utils: Move logging-related code into separate file
[ganeti-github.git] / lib / utils / __init__.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Ganeti utility module.
23
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
26
27 """
28
29
30 import os
31 import sys
32 import time
33 import subprocess
34 import re
35 import socket
36 import tempfile
37 import shutil
38 import errno
39 import pwd
40 import itertools
41 import select
42 import fcntl
43 import resource
44 import logging
45 import signal
46 import OpenSSL
47 import datetime
48 import calendar
49 import hmac
50
51 from cStringIO import StringIO
52
53 from ganeti import errors
54 from ganeti import constants
55 from ganeti import compat
56
57 from ganeti.utils.algo import * # pylint: disable-msg=W0401
58 from ganeti.utils.retry import * # pylint: disable-msg=W0401
59 from ganeti.utils.text import * # pylint: disable-msg=W0401
60 from ganeti.utils.mlock import * # pylint: disable-msg=W0401
61 from ganeti.utils.log import * # pylint: disable-msg=W0401
62
63 _locksheld = []
64
65 debug_locks = False
66
67 #: when set to True, L{RunCmd} is disabled
68 no_fork = False
69
70 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
71
72 HEX_CHAR_RE = r"[a-zA-Z0-9]"
73 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
74 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
75 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
76 HEX_CHAR_RE, HEX_CHAR_RE),
77 re.S | re.I)
78
79 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
80
81 UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
82 '[a-f0-9]{4}-[a-f0-9]{12}$')
83
84 # Certificate verification results
85 (CERT_WARNING,
86 CERT_ERROR) = range(1, 3)
87
88 (_TIMEOUT_NONE,
89 _TIMEOUT_TERM,
90 _TIMEOUT_KILL) = range(3)
91
92 #: Shell param checker regexp
93 _SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
94
95 #: ASN1 time regexp
96 _ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
97
98
99 class RunResult(object):
100 """Holds the result of running external programs.
101
102 @type exit_code: int
103 @ivar exit_code: the exit code of the program, or None (if the program
104 didn't exit())
105 @type signal: int or None
106 @ivar signal: the signal that caused the program to finish, or None
107 (if the program wasn't terminated by a signal)
108 @type stdout: str
109 @ivar stdout: the standard output of the program
110 @type stderr: str
111 @ivar stderr: the standard error of the program
112 @type failed: boolean
113 @ivar failed: True in case the program was
114 terminated by a signal or exited with a non-zero exit code
115 @ivar fail_reason: a string detailing the termination reason
116
117 """
118 __slots__ = ["exit_code", "signal", "stdout", "stderr",
119 "failed", "fail_reason", "cmd"]
120
121
122 def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
123 timeout):
124 self.cmd = cmd
125 self.exit_code = exit_code
126 self.signal = signal_
127 self.stdout = stdout
128 self.stderr = stderr
129 self.failed = (signal_ is not None or exit_code != 0)
130
131 fail_msgs = []
132 if self.signal is not None:
133 fail_msgs.append("terminated by signal %s" % self.signal)
134 elif self.exit_code is not None:
135 fail_msgs.append("exited with exit code %s" % self.exit_code)
136 else:
137 fail_msgs.append("unable to determine termination reason")
138
139 if timeout_action == _TIMEOUT_TERM:
140 fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
141 elif timeout_action == _TIMEOUT_KILL:
142 fail_msgs.append(("force termination after timeout of %.2f seconds"
143 " and linger for another %.2f seconds") %
144 (timeout, constants.CHILD_LINGER_TIMEOUT))
145
146 if fail_msgs and self.failed:
147 self.fail_reason = CommaJoin(fail_msgs)
148
149 if self.failed:
150 logging.debug("Command '%s' failed (%s); output: %s",
151 self.cmd, self.fail_reason, self.output)
152
153 def _GetOutput(self):
154 """Returns the combined stdout and stderr for easier usage.
155
156 """
157 return self.stdout + self.stderr
158
159 output = property(_GetOutput, None, None, "Return full output")
160
161
162 def _BuildCmdEnvironment(env, reset):
163 """Builds the environment for an external program.
164
165 """
166 if reset:
167 cmd_env = {}
168 else:
169 cmd_env = os.environ.copy()
170 cmd_env["LC_ALL"] = "C"
171
172 if env is not None:
173 cmd_env.update(env)
174
175 return cmd_env
176
177
178 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
179 interactive=False, timeout=None):
180 """Execute a (shell) command.
181
182 The command should not read from its standard input, as it will be
183 closed.
184
185 @type cmd: string or list
186 @param cmd: Command to run
187 @type env: dict
188 @param env: Additional environment variables
189 @type output: str
190 @param output: if desired, the output of the command can be
191 saved in a file instead of the RunResult instance; this
192 parameter denotes the file name (if not None)
193 @type cwd: string
194 @param cwd: if specified, will be used as the working
195 directory for the command; the default will be /
196 @type reset_env: boolean
197 @param reset_env: whether to reset or keep the default os environment
198 @type interactive: boolean
199 @param interactive: weather we pipe stdin, stdout and stderr
200 (default behaviour) or run the command interactive
201 @type timeout: int
202 @param timeout: If not None, timeout in seconds until child process gets
203 killed
204 @rtype: L{RunResult}
205 @return: RunResult instance
206 @raise errors.ProgrammerError: if we call this when forks are disabled
207
208 """
209 if no_fork:
210 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
211
212 if output and interactive:
213 raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
214 " not be provided at the same time")
215
216 if isinstance(cmd, basestring):
217 strcmd = cmd
218 shell = True
219 else:
220 cmd = [str(val) for val in cmd]
221 strcmd = ShellQuoteArgs(cmd)
222 shell = False
223
224 if output:
225 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
226 else:
227 logging.debug("RunCmd %s", strcmd)
228
229 cmd_env = _BuildCmdEnvironment(env, reset_env)
230
231 try:
232 if output is None:
233 out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
234 interactive, timeout)
235 else:
236 timeout_action = _TIMEOUT_NONE
237 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
238 out = err = ""
239 except OSError, err:
240 if err.errno == errno.ENOENT:
241 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
242 (strcmd, err))
243 else:
244 raise
245
246 if status >= 0:
247 exitcode = status
248 signal_ = None
249 else:
250 exitcode = None
251 signal_ = -status
252
253 return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
254
255
256 def SetupDaemonEnv(cwd="/", umask=077):
257 """Setup a daemon's environment.
258
259 This should be called between the first and second fork, due to
260 setsid usage.
261
262 @param cwd: the directory to which to chdir
263 @param umask: the umask to setup
264
265 """
266 os.chdir(cwd)
267 os.umask(umask)
268 os.setsid()
269
270
271 def SetupDaemonFDs(output_file, output_fd):
272 """Setups up a daemon's file descriptors.
273
274 @param output_file: if not None, the file to which to redirect
275 stdout/stderr
276 @param output_fd: if not None, the file descriptor for stdout/stderr
277
278 """
279 # check that at most one is defined
280 assert [output_file, output_fd].count(None) >= 1
281
282 # Open /dev/null (read-only, only for stdin)
283 devnull_fd = os.open(os.devnull, os.O_RDONLY)
284
285 if output_fd is not None:
286 pass
287 elif output_file is not None:
288 # Open output file
289 try:
290 output_fd = os.open(output_file,
291 os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
292 except EnvironmentError, err:
293 raise Exception("Opening output file failed: %s" % err)
294 else:
295 output_fd = os.open(os.devnull, os.O_WRONLY)
296
297 # Redirect standard I/O
298 os.dup2(devnull_fd, 0)
299 os.dup2(output_fd, 1)
300 os.dup2(output_fd, 2)
301
302
303 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
304 pidfile=None):
305 """Start a daemon process after forking twice.
306
307 @type cmd: string or list
308 @param cmd: Command to run
309 @type env: dict
310 @param env: Additional environment variables
311 @type cwd: string
312 @param cwd: Working directory for the program
313 @type output: string
314 @param output: Path to file in which to save the output
315 @type output_fd: int
316 @param output_fd: File descriptor for output
317 @type pidfile: string
318 @param pidfile: Process ID file
319 @rtype: int
320 @return: Daemon process ID
321 @raise errors.ProgrammerError: if we call this when forks are disabled
322
323 """
324 if no_fork:
325 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
326 " disabled")
327
328 if output and not (bool(output) ^ (output_fd is not None)):
329 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
330 " specified")
331
332 if isinstance(cmd, basestring):
333 cmd = ["/bin/sh", "-c", cmd]
334
335 strcmd = ShellQuoteArgs(cmd)
336
337 if output:
338 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
339 else:
340 logging.debug("StartDaemon %s", strcmd)
341
342 cmd_env = _BuildCmdEnvironment(env, False)
343
344 # Create pipe for sending PID back
345 (pidpipe_read, pidpipe_write) = os.pipe()
346 try:
347 try:
348 # Create pipe for sending error messages
349 (errpipe_read, errpipe_write) = os.pipe()
350 try:
351 try:
352 # First fork
353 pid = os.fork()
354 if pid == 0:
355 try:
356 # Child process, won't return
357 _StartDaemonChild(errpipe_read, errpipe_write,
358 pidpipe_read, pidpipe_write,
359 cmd, cmd_env, cwd,
360 output, output_fd, pidfile)
361 finally:
362 # Well, maybe child process failed
363 os._exit(1) # pylint: disable-msg=W0212
364 finally:
365 _CloseFDNoErr(errpipe_write)
366
367 # Wait for daemon to be started (or an error message to
368 # arrive) and read up to 100 KB as an error message
369 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
370 finally:
371 _CloseFDNoErr(errpipe_read)
372 finally:
373 _CloseFDNoErr(pidpipe_write)
374
375 # Read up to 128 bytes for PID
376 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
377 finally:
378 _CloseFDNoErr(pidpipe_read)
379
380 # Try to avoid zombies by waiting for child process
381 try:
382 os.waitpid(pid, 0)
383 except OSError:
384 pass
385
386 if errormsg:
387 raise errors.OpExecError("Error when starting daemon process: %r" %
388 errormsg)
389
390 try:
391 return int(pidtext)
392 except (ValueError, TypeError), err:
393 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
394 (pidtext, err))
395
396
397 def _StartDaemonChild(errpipe_read, errpipe_write,
398 pidpipe_read, pidpipe_write,
399 args, env, cwd,
400 output, fd_output, pidfile):
401 """Child process for starting daemon.
402
403 """
404 try:
405 # Close parent's side
406 _CloseFDNoErr(errpipe_read)
407 _CloseFDNoErr(pidpipe_read)
408
409 # First child process
410 SetupDaemonEnv()
411
412 # And fork for the second time
413 pid = os.fork()
414 if pid != 0:
415 # Exit first child process
416 os._exit(0) # pylint: disable-msg=W0212
417
418 # Make sure pipe is closed on execv* (and thereby notifies
419 # original process)
420 SetCloseOnExecFlag(errpipe_write, True)
421
422 # List of file descriptors to be left open
423 noclose_fds = [errpipe_write]
424
425 # Open PID file
426 if pidfile:
427 fd_pidfile = WritePidFile(pidfile)
428
429 # Keeping the file open to hold the lock
430 noclose_fds.append(fd_pidfile)
431
432 SetCloseOnExecFlag(fd_pidfile, False)
433 else:
434 fd_pidfile = None
435
436 SetupDaemonFDs(output, fd_output)
437
438 # Send daemon PID to parent
439 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
440
441 # Close all file descriptors except stdio and error message pipe
442 CloseFDs(noclose_fds=noclose_fds)
443
444 # Change working directory
445 os.chdir(cwd)
446
447 if env is None:
448 os.execvp(args[0], args)
449 else:
450 os.execvpe(args[0], args, env)
451 except: # pylint: disable-msg=W0702
452 try:
453 # Report errors to original process
454 WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
455 except: # pylint: disable-msg=W0702
456 # Ignore errors in error handling
457 pass
458
459 os._exit(1) # pylint: disable-msg=W0212
460
461
462 def WriteErrorToFD(fd, err):
463 """Possibly write an error message to a fd.
464
465 @type fd: None or int (file descriptor)
466 @param fd: if not None, the error will be written to this fd
467 @param err: string, the error message
468
469 """
470 if fd is None:
471 return
472
473 if not err:
474 err = "<unknown error>"
475
476 RetryOnSignal(os.write, fd, err)
477
478
479 def _CheckIfAlive(child):
480 """Raises L{RetryAgain} if child is still alive.
481
482 @raises RetryAgain: If child is still alive
483
484 """
485 if child.poll() is None:
486 raise RetryAgain()
487
488
489 def _WaitForProcess(child, timeout):
490 """Waits for the child to terminate or until we reach timeout.
491
492 """
493 try:
494 Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
495 except RetryTimeout:
496 pass
497
498
499 def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
500 _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
501 """Run a command and return its output.
502
503 @type cmd: string or list
504 @param cmd: Command to run
505 @type env: dict
506 @param env: The environment to use
507 @type via_shell: bool
508 @param via_shell: if we should run via the shell
509 @type cwd: string
510 @param cwd: the working directory for the program
511 @type interactive: boolean
512 @param interactive: Run command interactive (without piping)
513 @type timeout: int
514 @param timeout: Timeout after the programm gets terminated
515 @rtype: tuple
516 @return: (out, err, status)
517
518 """
519 poller = select.poll()
520
521 stderr = subprocess.PIPE
522 stdout = subprocess.PIPE
523 stdin = subprocess.PIPE
524
525 if interactive:
526 stderr = stdout = stdin = None
527
528 child = subprocess.Popen(cmd, shell=via_shell,
529 stderr=stderr,
530 stdout=stdout,
531 stdin=stdin,
532 close_fds=True, env=env,
533 cwd=cwd)
534
535 out = StringIO()
536 err = StringIO()
537
538 linger_timeout = None
539
540 if timeout is None:
541 poll_timeout = None
542 else:
543 poll_timeout = RunningTimeout(timeout, True).Remaining
544
545 msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
546 (cmd, child.pid))
547 msg_linger = ("Command %s (%d) run into linger timeout, killing" %
548 (cmd, child.pid))
549
550 timeout_action = _TIMEOUT_NONE
551
552 if not interactive:
553 child.stdin.close()
554 poller.register(child.stdout, select.POLLIN)
555 poller.register(child.stderr, select.POLLIN)
556 fdmap = {
557 child.stdout.fileno(): (out, child.stdout),
558 child.stderr.fileno(): (err, child.stderr),
559 }
560 for fd in fdmap:
561 SetNonblockFlag(fd, True)
562
563 while fdmap:
564 if poll_timeout:
565 pt = poll_timeout() * 1000
566 if pt < 0:
567 if linger_timeout is None:
568 logging.warning(msg_timeout)
569 if child.poll() is None:
570 timeout_action = _TIMEOUT_TERM
571 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
572 linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
573 pt = linger_timeout() * 1000
574 if pt < 0:
575 break
576 else:
577 pt = None
578
579 pollresult = RetryOnSignal(poller.poll, pt)
580
581 for fd, event in pollresult:
582 if event & select.POLLIN or event & select.POLLPRI:
583 data = fdmap[fd][1].read()
584 # no data from read signifies EOF (the same as POLLHUP)
585 if not data:
586 poller.unregister(fd)
587 del fdmap[fd]
588 continue
589 fdmap[fd][0].write(data)
590 if (event & select.POLLNVAL or event & select.POLLHUP or
591 event & select.POLLERR):
592 poller.unregister(fd)
593 del fdmap[fd]
594
595 if timeout is not None:
596 assert callable(poll_timeout)
597
598 # We have no I/O left but it might still run
599 if child.poll() is None:
600 _WaitForProcess(child, poll_timeout())
601
602 # Terminate if still alive after timeout
603 if child.poll() is None:
604 if linger_timeout is None:
605 logging.warning(msg_timeout)
606 timeout_action = _TIMEOUT_TERM
607 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
608 lt = _linger_timeout
609 else:
610 lt = linger_timeout()
611 _WaitForProcess(child, lt)
612
613 # Okay, still alive after timeout and linger timeout? Kill it!
614 if child.poll() is None:
615 timeout_action = _TIMEOUT_KILL
616 logging.warning(msg_linger)
617 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
618
619 out = out.getvalue()
620 err = err.getvalue()
621
622 status = child.wait()
623 return out, err, status, timeout_action
624
625
626 def _RunCmdFile(cmd, env, via_shell, output, cwd):
627 """Run a command and save its output to a file.
628
629 @type cmd: string or list
630 @param cmd: Command to run
631 @type env: dict
632 @param env: The environment to use
633 @type via_shell: bool
634 @param via_shell: if we should run via the shell
635 @type output: str
636 @param output: the filename in which to save the output
637 @type cwd: string
638 @param cwd: the working directory for the program
639 @rtype: int
640 @return: the exit status
641
642 """
643 fh = open(output, "a")
644 try:
645 child = subprocess.Popen(cmd, shell=via_shell,
646 stderr=subprocess.STDOUT,
647 stdout=fh,
648 stdin=subprocess.PIPE,
649 close_fds=True, env=env,
650 cwd=cwd)
651
652 child.stdin.close()
653 status = child.wait()
654 finally:
655 fh.close()
656 return status
657
658
659 def SetCloseOnExecFlag(fd, enable):
660 """Sets or unsets the close-on-exec flag on a file descriptor.
661
662 @type fd: int
663 @param fd: File descriptor
664 @type enable: bool
665 @param enable: Whether to set or unset it.
666
667 """
668 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
669
670 if enable:
671 flags |= fcntl.FD_CLOEXEC
672 else:
673 flags &= ~fcntl.FD_CLOEXEC
674
675 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
676
677
678 def SetNonblockFlag(fd, enable):
679 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
680
681 @type fd: int
682 @param fd: File descriptor
683 @type enable: bool
684 @param enable: Whether to set or unset it
685
686 """
687 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
688
689 if enable:
690 flags |= os.O_NONBLOCK
691 else:
692 flags &= ~os.O_NONBLOCK
693
694 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
695
696
697 def RetryOnSignal(fn, *args, **kwargs):
698 """Calls a function again if it failed due to EINTR.
699
700 """
701 while True:
702 try:
703 return fn(*args, **kwargs)
704 except EnvironmentError, err:
705 if err.errno != errno.EINTR:
706 raise
707 except (socket.error, select.error), err:
708 # In python 2.6 and above select.error is an IOError, so it's handled
709 # above, in 2.5 and below it's not, and it's handled here.
710 if not (err.args and err.args[0] == errno.EINTR):
711 raise
712
713
714 def RunParts(dir_name, env=None, reset_env=False):
715 """Run Scripts or programs in a directory
716
717 @type dir_name: string
718 @param dir_name: absolute path to a directory
719 @type env: dict
720 @param env: The environment to use
721 @type reset_env: boolean
722 @param reset_env: whether to reset or keep the default os environment
723 @rtype: list of tuples
724 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
725
726 """
727 rr = []
728
729 try:
730 dir_contents = ListVisibleFiles(dir_name)
731 except OSError, err:
732 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
733 return rr
734
735 for relname in sorted(dir_contents):
736 fname = PathJoin(dir_name, relname)
737 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
738 constants.EXT_PLUGIN_MASK.match(relname) is not None):
739 rr.append((relname, constants.RUNPARTS_SKIP, None))
740 else:
741 try:
742 result = RunCmd([fname], env=env, reset_env=reset_env)
743 except Exception, err: # pylint: disable-msg=W0703
744 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
745 else:
746 rr.append((relname, constants.RUNPARTS_RUN, result))
747
748 return rr
749
750
751 def RemoveFile(filename):
752 """Remove a file ignoring some errors.
753
754 Remove a file, ignoring non-existing ones or directories. Other
755 errors are passed.
756
757 @type filename: str
758 @param filename: the file to be removed
759
760 """
761 try:
762 os.unlink(filename)
763 except OSError, err:
764 if err.errno not in (errno.ENOENT, errno.EISDIR):
765 raise
766
767
768 def RemoveDir(dirname):
769 """Remove an empty directory.
770
771 Remove a directory, ignoring non-existing ones.
772 Other errors are passed. This includes the case,
773 where the directory is not empty, so it can't be removed.
774
775 @type dirname: str
776 @param dirname: the empty directory to be removed
777
778 """
779 try:
780 os.rmdir(dirname)
781 except OSError, err:
782 if err.errno != errno.ENOENT:
783 raise
784
785
786 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
787 """Renames a file.
788
789 @type old: string
790 @param old: Original path
791 @type new: string
792 @param new: New path
793 @type mkdir: bool
794 @param mkdir: Whether to create target directory if it doesn't exist
795 @type mkdir_mode: int
796 @param mkdir_mode: Mode for newly created directories
797
798 """
799 try:
800 return os.rename(old, new)
801 except OSError, err:
802 # In at least one use case of this function, the job queue, directory
803 # creation is very rare. Checking for the directory before renaming is not
804 # as efficient.
805 if mkdir and err.errno == errno.ENOENT:
806 # Create directory and try again
807 Makedirs(os.path.dirname(new), mode=mkdir_mode)
808
809 return os.rename(old, new)
810
811 raise
812
813
814 def Makedirs(path, mode=0750):
815 """Super-mkdir; create a leaf directory and all intermediate ones.
816
817 This is a wrapper around C{os.makedirs} adding error handling not implemented
818 before Python 2.5.
819
820 """
821 try:
822 os.makedirs(path, mode)
823 except OSError, err:
824 # Ignore EEXIST. This is only handled in os.makedirs as included in
825 # Python 2.5 and above.
826 if err.errno != errno.EEXIST or not os.path.exists(path):
827 raise
828
829
830 def ResetTempfileModule():
831 """Resets the random name generator of the tempfile module.
832
833 This function should be called after C{os.fork} in the child process to
834 ensure it creates a newly seeded random generator. Otherwise it would
835 generate the same random parts as the parent process. If several processes
836 race for the creation of a temporary file, this could lead to one not getting
837 a temporary name.
838
839 """
840 # pylint: disable-msg=W0212
841 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
842 tempfile._once_lock.acquire()
843 try:
844 # Reset random name generator
845 tempfile._name_sequence = None
846 finally:
847 tempfile._once_lock.release()
848 else:
849 logging.critical("The tempfile module misses at least one of the"
850 " '_once_lock' and '_name_sequence' attributes")
851
852
853 def _FingerprintFile(filename):
854 """Compute the fingerprint of a file.
855
856 If the file does not exist, a None will be returned
857 instead.
858
859 @type filename: str
860 @param filename: the filename to checksum
861 @rtype: str
862 @return: the hex digest of the sha checksum of the contents
863 of the file
864
865 """
866 if not (os.path.exists(filename) and os.path.isfile(filename)):
867 return None
868
869 f = open(filename)
870
871 fp = compat.sha1_hash()
872 while True:
873 data = f.read(4096)
874 if not data:
875 break
876
877 fp.update(data)
878
879 return fp.hexdigest()
880
881
882 def FingerprintFiles(files):
883 """Compute fingerprints for a list of files.
884
885 @type files: list
886 @param files: the list of filename to fingerprint
887 @rtype: dict
888 @return: a dictionary filename: fingerprint, holding only
889 existing files
890
891 """
892 ret = {}
893
894 for filename in files:
895 cksum = _FingerprintFile(filename)
896 if cksum:
897 ret[filename] = cksum
898
899 return ret
900
901
902 def ForceDictType(target, key_types, allowed_values=None):
903 """Force the values of a dict to have certain types.
904
905 @type target: dict
906 @param target: the dict to update
907 @type key_types: dict
908 @param key_types: dict mapping target dict keys to types
909 in constants.ENFORCEABLE_TYPES
910 @type allowed_values: list
911 @keyword allowed_values: list of specially allowed values
912
913 """
914 if allowed_values is None:
915 allowed_values = []
916
917 if not isinstance(target, dict):
918 msg = "Expected dictionary, got '%s'" % target
919 raise errors.TypeEnforcementError(msg)
920
921 for key in target:
922 if key not in key_types:
923 msg = "Unknown key '%s'" % key
924 raise errors.TypeEnforcementError(msg)
925
926 if target[key] in allowed_values:
927 continue
928
929 ktype = key_types[key]
930 if ktype not in constants.ENFORCEABLE_TYPES:
931 msg = "'%s' has non-enforceable type %s" % (key, ktype)
932 raise errors.ProgrammerError(msg)
933
934 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
935 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
936 pass
937 elif not isinstance(target[key], basestring):
938 if isinstance(target[key], bool) and not target[key]:
939 target[key] = ''
940 else:
941 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
942 raise errors.TypeEnforcementError(msg)
943 elif ktype == constants.VTYPE_BOOL:
944 if isinstance(target[key], basestring) and target[key]:
945 if target[key].lower() == constants.VALUE_FALSE:
946 target[key] = False
947 elif target[key].lower() == constants.VALUE_TRUE:
948 target[key] = True
949 else:
950 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
951 raise errors.TypeEnforcementError(msg)
952 elif target[key]:
953 target[key] = True
954 else:
955 target[key] = False
956 elif ktype == constants.VTYPE_SIZE:
957 try:
958 target[key] = ParseUnit(target[key])
959 except errors.UnitParseError, err:
960 msg = "'%s' (value %s) is not a valid size. error: %s" % \
961 (key, target[key], err)
962 raise errors.TypeEnforcementError(msg)
963 elif ktype == constants.VTYPE_INT:
964 try:
965 target[key] = int(target[key])
966 except (ValueError, TypeError):
967 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
968 raise errors.TypeEnforcementError(msg)
969
970
971 def _GetProcStatusPath(pid):
972 """Returns the path for a PID's proc status file.
973
974 @type pid: int
975 @param pid: Process ID
976 @rtype: string
977
978 """
979 return "/proc/%d/status" % pid
980
981
982 def IsProcessAlive(pid):
983 """Check if a given pid exists on the system.
984
985 @note: zombie status is not handled, so zombie processes
986 will be returned as alive
987 @type pid: int
988 @param pid: the process ID to check
989 @rtype: boolean
990 @return: True if the process exists
991
992 """
993 def _TryStat(name):
994 try:
995 os.stat(name)
996 return True
997 except EnvironmentError, err:
998 if err.errno in (errno.ENOENT, errno.ENOTDIR):
999 return False
1000 elif err.errno == errno.EINVAL:
1001 raise RetryAgain(err)
1002 raise
1003
1004 assert isinstance(pid, int), "pid must be an integer"
1005 if pid <= 0:
1006 return False
1007
1008 # /proc in a multiprocessor environment can have strange behaviors.
1009 # Retry the os.stat a few times until we get a good result.
1010 try:
1011 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1012 args=[_GetProcStatusPath(pid)])
1013 except RetryTimeout, err:
1014 err.RaiseInner()
1015
1016
1017 def _ParseSigsetT(sigset):
1018 """Parse a rendered sigset_t value.
1019
1020 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1021 function.
1022
1023 @type sigset: string
1024 @param sigset: Rendered signal set from /proc/$pid/status
1025 @rtype: set
1026 @return: Set of all enabled signal numbers
1027
1028 """
1029 result = set()
1030
1031 signum = 0
1032 for ch in reversed(sigset):
1033 chv = int(ch, 16)
1034
1035 # The following could be done in a loop, but it's easier to read and
1036 # understand in the unrolled form
1037 if chv & 1:
1038 result.add(signum + 1)
1039 if chv & 2:
1040 result.add(signum + 2)
1041 if chv & 4:
1042 result.add(signum + 3)
1043 if chv & 8:
1044 result.add(signum + 4)
1045
1046 signum += 4
1047
1048 return result
1049
1050
1051 def _GetProcStatusField(pstatus, field):
1052 """Retrieves a field from the contents of a proc status file.
1053
1054 @type pstatus: string
1055 @param pstatus: Contents of /proc/$pid/status
1056 @type field: string
1057 @param field: Name of field whose value should be returned
1058 @rtype: string
1059
1060 """
1061 for line in pstatus.splitlines():
1062 parts = line.split(":", 1)
1063
1064 if len(parts) < 2 or parts[0] != field:
1065 continue
1066
1067 return parts[1].strip()
1068
1069 return None
1070
1071
1072 def IsProcessHandlingSignal(pid, signum, status_path=None):
1073 """Checks whether a process is handling a signal.
1074
1075 @type pid: int
1076 @param pid: Process ID
1077 @type signum: int
1078 @param signum: Signal number
1079 @rtype: bool
1080
1081 """
1082 if status_path is None:
1083 status_path = _GetProcStatusPath(pid)
1084
1085 try:
1086 proc_status = ReadFile(status_path)
1087 except EnvironmentError, err:
1088 # In at least one case, reading /proc/$pid/status failed with ESRCH.
1089 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1090 return False
1091 raise
1092
1093 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1094 if sigcgt is None:
1095 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1096
1097 # Now check whether signal is handled
1098 return signum in _ParseSigsetT(sigcgt)
1099
1100
1101 def ReadPidFile(pidfile):
1102 """Read a pid from a file.
1103
1104 @type pidfile: string
1105 @param pidfile: path to the file containing the pid
1106 @rtype: int
1107 @return: The process id, if the file exists and contains a valid PID,
1108 otherwise 0
1109
1110 """
1111 try:
1112 raw_data = ReadOneLineFile(pidfile)
1113 except EnvironmentError, err:
1114 if err.errno != errno.ENOENT:
1115 logging.exception("Can't read pid file")
1116 return 0
1117
1118 try:
1119 pid = int(raw_data)
1120 except (TypeError, ValueError), err:
1121 logging.info("Can't parse pid file contents", exc_info=True)
1122 return 0
1123
1124 return pid
1125
1126
1127 def ReadLockedPidFile(path):
1128 """Reads a locked PID file.
1129
1130 This can be used together with L{StartDaemon}.
1131
1132 @type path: string
1133 @param path: Path to PID file
1134 @return: PID as integer or, if file was unlocked or couldn't be opened, None
1135
1136 """
1137 try:
1138 fd = os.open(path, os.O_RDONLY)
1139 except EnvironmentError, err:
1140 if err.errno == errno.ENOENT:
1141 # PID file doesn't exist
1142 return None
1143 raise
1144
1145 try:
1146 try:
1147 # Try to acquire lock
1148 LockFile(fd)
1149 except errors.LockError:
1150 # Couldn't lock, daemon is running
1151 return int(os.read(fd, 100))
1152 finally:
1153 os.close(fd)
1154
1155 return None
1156
1157
1158 def ValidateServiceName(name):
1159 """Validate the given service name.
1160
1161 @type name: number or string
1162 @param name: Service name or port specification
1163
1164 """
1165 try:
1166 numport = int(name)
1167 except (ValueError, TypeError):
1168 # Non-numeric service name
1169 valid = _VALID_SERVICE_NAME_RE.match(name)
1170 else:
1171 # Numeric port (protocols other than TCP or UDP might need adjustments
1172 # here)
1173 valid = (numport >= 0 and numport < (1 << 16))
1174
1175 if not valid:
1176 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1177 errors.ECODE_INVAL)
1178
1179 return name
1180
1181
1182 def ListVolumeGroups():
1183 """List volume groups and their size
1184
1185 @rtype: dict
1186 @return:
1187 Dictionary with keys volume name and values
1188 the size of the volume
1189
1190 """
1191 command = "vgs --noheadings --units m --nosuffix -o name,size"
1192 result = RunCmd(command)
1193 retval = {}
1194 if result.failed:
1195 return retval
1196
1197 for line in result.stdout.splitlines():
1198 try:
1199 name, size = line.split()
1200 size = int(float(size))
1201 except (IndexError, ValueError), err:
1202 logging.error("Invalid output from vgs (%s): %s", err, line)
1203 continue
1204
1205 retval[name] = size
1206
1207 return retval
1208
1209
1210 def BridgeExists(bridge):
1211 """Check whether the given bridge exists in the system
1212
1213 @type bridge: str
1214 @param bridge: the bridge name to check
1215 @rtype: boolean
1216 @return: True if it does
1217
1218 """
1219 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1220
1221
1222 def TryConvert(fn, val):
1223 """Try to convert a value ignoring errors.
1224
1225 This function tries to apply function I{fn} to I{val}. If no
1226 C{ValueError} or C{TypeError} exceptions are raised, it will return
1227 the result, else it will return the original value. Any other
1228 exceptions are propagated to the caller.
1229
1230 @type fn: callable
1231 @param fn: function to apply to the value
1232 @param val: the value to be converted
1233 @return: The converted value if the conversion was successful,
1234 otherwise the original value.
1235
1236 """
1237 try:
1238 nv = fn(val)
1239 except (ValueError, TypeError):
1240 nv = val
1241 return nv
1242
1243
1244 def IsValidShellParam(word):
1245 """Verifies is the given word is safe from the shell's p.o.v.
1246
1247 This means that we can pass this to a command via the shell and be
1248 sure that it doesn't alter the command line and is passed as such to
1249 the actual command.
1250
1251 Note that we are overly restrictive here, in order to be on the safe
1252 side.
1253
1254 @type word: str
1255 @param word: the word to check
1256 @rtype: boolean
1257 @return: True if the word is 'safe'
1258
1259 """
1260 return bool(_SHELLPARAM_REGEX.match(word))
1261
1262
1263 def BuildShellCmd(template, *args):
1264 """Build a safe shell command line from the given arguments.
1265
1266 This function will check all arguments in the args list so that they
1267 are valid shell parameters (i.e. they don't contain shell
1268 metacharacters). If everything is ok, it will return the result of
1269 template % args.
1270
1271 @type template: str
1272 @param template: the string holding the template for the
1273 string formatting
1274 @rtype: str
1275 @return: the expanded command line
1276
1277 """
1278 for word in args:
1279 if not IsValidShellParam(word):
1280 raise errors.ProgrammerError("Shell argument '%s' contains"
1281 " invalid characters" % word)
1282 return template % args
1283
1284
1285 def ParseCpuMask(cpu_mask):
1286 """Parse a CPU mask definition and return the list of CPU IDs.
1287
1288 CPU mask format: comma-separated list of CPU IDs
1289 or dash-separated ID ranges
1290 Example: "0-2,5" -> "0,1,2,5"
1291
1292 @type cpu_mask: str
1293 @param cpu_mask: CPU mask definition
1294 @rtype: list of int
1295 @return: list of CPU IDs
1296
1297 """
1298 if not cpu_mask:
1299 return []
1300 cpu_list = []
1301 for range_def in cpu_mask.split(","):
1302 boundaries = range_def.split("-")
1303 n_elements = len(boundaries)
1304 if n_elements > 2:
1305 raise errors.ParseError("Invalid CPU ID range definition"
1306 " (only one hyphen allowed): %s" % range_def)
1307 try:
1308 lower = int(boundaries[0])
1309 except (ValueError, TypeError), err:
1310 raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1311 " CPU ID range: %s" % str(err))
1312 try:
1313 higher = int(boundaries[-1])
1314 except (ValueError, TypeError), err:
1315 raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1316 " CPU ID range: %s" % str(err))
1317 if lower > higher:
1318 raise errors.ParseError("Invalid CPU ID range definition"
1319 " (%d > %d): %s" % (lower, higher, range_def))
1320 cpu_list.extend(range(lower, higher + 1))
1321 return cpu_list
1322
1323
1324 def AddAuthorizedKey(file_obj, key):
1325 """Adds an SSH public key to an authorized_keys file.
1326
1327 @type file_obj: str or file handle
1328 @param file_obj: path to authorized_keys file
1329 @type key: str
1330 @param key: string containing key
1331
1332 """
1333 key_fields = key.split()
1334
1335 if isinstance(file_obj, basestring):
1336 f = open(file_obj, 'a+')
1337 else:
1338 f = file_obj
1339
1340 try:
1341 nl = True
1342 for line in f:
1343 # Ignore whitespace changes
1344 if line.split() == key_fields:
1345 break
1346 nl = line.endswith('\n')
1347 else:
1348 if not nl:
1349 f.write("\n")
1350 f.write(key.rstrip('\r\n'))
1351 f.write("\n")
1352 f.flush()
1353 finally:
1354 f.close()
1355
1356
1357 def RemoveAuthorizedKey(file_name, key):
1358 """Removes an SSH public key from an authorized_keys file.
1359
1360 @type file_name: str
1361 @param file_name: path to authorized_keys file
1362 @type key: str
1363 @param key: string containing key
1364
1365 """
1366 key_fields = key.split()
1367
1368 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1369 try:
1370 out = os.fdopen(fd, 'w')
1371 try:
1372 f = open(file_name, 'r')
1373 try:
1374 for line in f:
1375 # Ignore whitespace changes while comparing lines
1376 if line.split() != key_fields:
1377 out.write(line)
1378
1379 out.flush()
1380 os.rename(tmpname, file_name)
1381 finally:
1382 f.close()
1383 finally:
1384 out.close()
1385 except:
1386 RemoveFile(tmpname)
1387 raise
1388
1389
1390 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1391 """Sets the name of an IP address and hostname in /etc/hosts.
1392
1393 @type file_name: str
1394 @param file_name: path to the file to modify (usually C{/etc/hosts})
1395 @type ip: str
1396 @param ip: the IP address
1397 @type hostname: str
1398 @param hostname: the hostname to be added
1399 @type aliases: list
1400 @param aliases: the list of aliases to add for the hostname
1401
1402 """
1403 # Ensure aliases are unique
1404 aliases = UniqueSequence([hostname] + aliases)[1:]
1405
1406 def _WriteEtcHosts(fd):
1407 # Duplicating file descriptor because os.fdopen's result will automatically
1408 # close the descriptor, but we would still like to have its functionality.
1409 out = os.fdopen(os.dup(fd), "w")
1410 try:
1411 for line in ReadFile(file_name).splitlines(True):
1412 fields = line.split()
1413 if fields and not fields[0].startswith("#") and ip == fields[0]:
1414 continue
1415 out.write(line)
1416
1417 out.write("%s\t%s" % (ip, hostname))
1418 if aliases:
1419 out.write(" %s" % " ".join(aliases))
1420 out.write("\n")
1421 out.flush()
1422 finally:
1423 out.close()
1424
1425 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1426
1427
1428 def AddHostToEtcHosts(hostname, ip):
1429 """Wrapper around SetEtcHostsEntry.
1430
1431 @type hostname: str
1432 @param hostname: a hostname that will be resolved and added to
1433 L{constants.ETC_HOSTS}
1434 @type ip: str
1435 @param ip: The ip address of the host
1436
1437 """
1438 SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1439
1440
1441 def RemoveEtcHostsEntry(file_name, hostname):
1442 """Removes a hostname from /etc/hosts.
1443
1444 IP addresses without names are removed from the file.
1445
1446 @type file_name: str
1447 @param file_name: path to the file to modify (usually C{/etc/hosts})
1448 @type hostname: str
1449 @param hostname: the hostname to be removed
1450
1451 """
1452 def _WriteEtcHosts(fd):
1453 # Duplicating file descriptor because os.fdopen's result will automatically
1454 # close the descriptor, but we would still like to have its functionality.
1455 out = os.fdopen(os.dup(fd), "w")
1456 try:
1457 for line in ReadFile(file_name).splitlines(True):
1458 fields = line.split()
1459 if len(fields) > 1 and not fields[0].startswith("#"):
1460 names = fields[1:]
1461 if hostname in names:
1462 while hostname in names:
1463 names.remove(hostname)
1464 if names:
1465 out.write("%s %s\n" % (fields[0], " ".join(names)))
1466 continue
1467
1468 out.write(line)
1469
1470 out.flush()
1471 finally:
1472 out.close()
1473
1474 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1475
1476
1477 def RemoveHostFromEtcHosts(hostname):
1478 """Wrapper around RemoveEtcHostsEntry.
1479
1480 @type hostname: str
1481 @param hostname: hostname that will be resolved and its
1482 full and shot name will be removed from
1483 L{constants.ETC_HOSTS}
1484
1485 """
1486 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1487 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1488
1489
1490 def TimestampForFilename():
1491 """Returns the current time formatted for filenames.
1492
1493 The format doesn't contain colons as some shells and applications treat them
1494 as separators. Uses the local timezone.
1495
1496 """
1497 return time.strftime("%Y-%m-%d_%H_%M_%S")
1498
1499
1500 def CreateBackup(file_name):
1501 """Creates a backup of a file.
1502
1503 @type file_name: str
1504 @param file_name: file to be backed up
1505 @rtype: str
1506 @return: the path to the newly created backup
1507 @raise errors.ProgrammerError: for invalid file names
1508
1509 """
1510 if not os.path.isfile(file_name):
1511 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1512 file_name)
1513
1514 prefix = ("%s.backup-%s." %
1515 (os.path.basename(file_name), TimestampForFilename()))
1516 dir_name = os.path.dirname(file_name)
1517
1518 fsrc = open(file_name, 'rb')
1519 try:
1520 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1521 fdst = os.fdopen(fd, 'wb')
1522 try:
1523 logging.debug("Backing up %s at %s", file_name, backup_name)
1524 shutil.copyfileobj(fsrc, fdst)
1525 finally:
1526 fdst.close()
1527 finally:
1528 fsrc.close()
1529
1530 return backup_name
1531
1532
1533 def ListVisibleFiles(path):
1534 """Returns a list of visible files in a directory.
1535
1536 @type path: str
1537 @param path: the directory to enumerate
1538 @rtype: list
1539 @return: the list of all files not starting with a dot
1540 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1541
1542 """
1543 if not IsNormAbsPath(path):
1544 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1545 " absolute/normalized: '%s'" % path)
1546 files = [i for i in os.listdir(path) if not i.startswith(".")]
1547 return files
1548
1549
1550 def GetHomeDir(user, default=None):
1551 """Try to get the homedir of the given user.
1552
1553 The user can be passed either as a string (denoting the name) or as
1554 an integer (denoting the user id). If the user is not found, the
1555 'default' argument is returned, which defaults to None.
1556
1557 """
1558 try:
1559 if isinstance(user, basestring):
1560 result = pwd.getpwnam(user)
1561 elif isinstance(user, (int, long)):
1562 result = pwd.getpwuid(user)
1563 else:
1564 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1565 type(user))
1566 except KeyError:
1567 return default
1568 return result.pw_dir
1569
1570
1571 def NewUUID():
1572 """Returns a random UUID.
1573
1574 @note: This is a Linux-specific method as it uses the /proc
1575 filesystem.
1576 @rtype: str
1577
1578 """
1579 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1580
1581
1582 def EnsureDirs(dirs):
1583 """Make required directories, if they don't exist.
1584
1585 @param dirs: list of tuples (dir_name, dir_mode)
1586 @type dirs: list of (string, integer)
1587
1588 """
1589 for dir_name, dir_mode in dirs:
1590 try:
1591 os.mkdir(dir_name, dir_mode)
1592 except EnvironmentError, err:
1593 if err.errno != errno.EEXIST:
1594 raise errors.GenericError("Cannot create needed directory"
1595 " '%s': %s" % (dir_name, err))
1596 try:
1597 os.chmod(dir_name, dir_mode)
1598 except EnvironmentError, err:
1599 raise errors.GenericError("Cannot change directory permissions on"
1600 " '%s': %s" % (dir_name, err))
1601 if not os.path.isdir(dir_name):
1602 raise errors.GenericError("%s is not a directory" % dir_name)
1603
1604
1605 def ReadFile(file_name, size=-1):
1606 """Reads a file.
1607
1608 @type size: int
1609 @param size: Read at most size bytes (if negative, entire file)
1610 @rtype: str
1611 @return: the (possibly partial) content of the file
1612
1613 """
1614 f = open(file_name, "r")
1615 try:
1616 return f.read(size)
1617 finally:
1618 f.close()
1619
1620
1621 def WriteFile(file_name, fn=None, data=None,
1622 mode=None, uid=-1, gid=-1,
1623 atime=None, mtime=None, close=True,
1624 dry_run=False, backup=False,
1625 prewrite=None, postwrite=None):
1626 """(Over)write a file atomically.
1627
1628 The file_name and either fn (a function taking one argument, the
1629 file descriptor, and which should write the data to it) or data (the
1630 contents of the file) must be passed. The other arguments are
1631 optional and allow setting the file mode, owner and group, and the
1632 mtime/atime of the file.
1633
1634 If the function doesn't raise an exception, it has succeeded and the
1635 target file has the new contents. If the function has raised an
1636 exception, an existing target file should be unmodified and the
1637 temporary file should be removed.
1638
1639 @type file_name: str
1640 @param file_name: the target filename
1641 @type fn: callable
1642 @param fn: content writing function, called with
1643 file descriptor as parameter
1644 @type data: str
1645 @param data: contents of the file
1646 @type mode: int
1647 @param mode: file mode
1648 @type uid: int
1649 @param uid: the owner of the file
1650 @type gid: int
1651 @param gid: the group of the file
1652 @type atime: int
1653 @param atime: a custom access time to be set on the file
1654 @type mtime: int
1655 @param mtime: a custom modification time to be set on the file
1656 @type close: boolean
1657 @param close: whether to close file after writing it
1658 @type prewrite: callable
1659 @param prewrite: function to be called before writing content
1660 @type postwrite: callable
1661 @param postwrite: function to be called after writing content
1662
1663 @rtype: None or int
1664 @return: None if the 'close' parameter evaluates to True,
1665 otherwise the file descriptor
1666
1667 @raise errors.ProgrammerError: if any of the arguments are not valid
1668
1669 """
1670 if not os.path.isabs(file_name):
1671 raise errors.ProgrammerError("Path passed to WriteFile is not"
1672 " absolute: '%s'" % file_name)
1673
1674 if [fn, data].count(None) != 1:
1675 raise errors.ProgrammerError("fn or data required")
1676
1677 if [atime, mtime].count(None) == 1:
1678 raise errors.ProgrammerError("Both atime and mtime must be either"
1679 " set or None")
1680
1681 if backup and not dry_run and os.path.isfile(file_name):
1682 CreateBackup(file_name)
1683
1684 dir_name, base_name = os.path.split(file_name)
1685 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1686 do_remove = True
1687 # here we need to make sure we remove the temp file, if any error
1688 # leaves it in place
1689 try:
1690 if uid != -1 or gid != -1:
1691 os.chown(new_name, uid, gid)
1692 if mode:
1693 os.chmod(new_name, mode)
1694 if callable(prewrite):
1695 prewrite(fd)
1696 if data is not None:
1697 os.write(fd, data)
1698 else:
1699 fn(fd)
1700 if callable(postwrite):
1701 postwrite(fd)
1702 os.fsync(fd)
1703 if atime is not None and mtime is not None:
1704 os.utime(new_name, (atime, mtime))
1705 if not dry_run:
1706 os.rename(new_name, file_name)
1707 do_remove = False
1708 finally:
1709 if close:
1710 os.close(fd)
1711 result = None
1712 else:
1713 result = fd
1714 if do_remove:
1715 RemoveFile(new_name)
1716
1717 return result
1718
1719
1720 def GetFileID(path=None, fd=None):
1721 """Returns the file 'id', i.e. the dev/inode and mtime information.
1722
1723 Either the path to the file or the fd must be given.
1724
1725 @param path: the file path
1726 @param fd: a file descriptor
1727 @return: a tuple of (device number, inode number, mtime)
1728
1729 """
1730 if [path, fd].count(None) != 1:
1731 raise errors.ProgrammerError("One and only one of fd/path must be given")
1732
1733 if fd is None:
1734 st = os.stat(path)
1735 else:
1736 st = os.fstat(fd)
1737
1738 return (st.st_dev, st.st_ino, st.st_mtime)
1739
1740
1741 def VerifyFileID(fi_disk, fi_ours):
1742 """Verifies that two file IDs are matching.
1743
1744 Differences in the inode/device are not accepted, but and older
1745 timestamp for fi_disk is accepted.
1746
1747 @param fi_disk: tuple (dev, inode, mtime) representing the actual
1748 file data
1749 @param fi_ours: tuple (dev, inode, mtime) representing the last
1750 written file data
1751 @rtype: boolean
1752
1753 """
1754 (d1, i1, m1) = fi_disk
1755 (d2, i2, m2) = fi_ours
1756
1757 return (d1, i1) == (d2, i2) and m1 <= m2
1758
1759
1760 def SafeWriteFile(file_name, file_id, **kwargs):
1761 """Wraper over L{WriteFile} that locks the target file.
1762
1763 By keeping the target file locked during WriteFile, we ensure that
1764 cooperating writers will safely serialise access to the file.
1765
1766 @type file_name: str
1767 @param file_name: the target filename
1768 @type file_id: tuple
1769 @param file_id: a result from L{GetFileID}
1770
1771 """
1772 fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
1773 try:
1774 LockFile(fd)
1775 if file_id is not None:
1776 disk_id = GetFileID(fd=fd)
1777 if not VerifyFileID(disk_id, file_id):
1778 raise errors.LockError("Cannot overwrite file %s, it has been modified"
1779 " since last written" % file_name)
1780 return WriteFile(file_name, **kwargs)
1781 finally:
1782 os.close(fd)
1783
1784
1785 def ReadOneLineFile(file_name, strict=False):
1786 """Return the first non-empty line from a file.
1787
1788 @type strict: boolean
1789 @param strict: if True, abort if the file has more than one
1790 non-empty line
1791
1792 """
1793 file_lines = ReadFile(file_name).splitlines()
1794 full_lines = filter(bool, file_lines)
1795 if not file_lines or not full_lines:
1796 raise errors.GenericError("No data in one-liner file %s" % file_name)
1797 elif strict and len(full_lines) > 1:
1798 raise errors.GenericError("Too many lines in one-liner file %s" %
1799 file_name)
1800 return full_lines[0]
1801
1802
1803 def FirstFree(seq, base=0):
1804 """Returns the first non-existing integer from seq.
1805
1806 The seq argument should be a sorted list of positive integers. The
1807 first time the index of an element is smaller than the element
1808 value, the index will be returned.
1809
1810 The base argument is used to start at a different offset,
1811 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1812
1813 Example: C{[0, 1, 3]} will return I{2}.
1814
1815 @type seq: sequence
1816 @param seq: the sequence to be analyzed.
1817 @type base: int
1818 @param base: use this value as the base index of the sequence
1819 @rtype: int
1820 @return: the first non-used index in the sequence
1821
1822 """
1823 for idx, elem in enumerate(seq):
1824 assert elem >= base, "Passed element is higher than base offset"
1825 if elem > idx + base:
1826 # idx is not used
1827 return idx + base
1828 return None
1829
1830
1831 def SingleWaitForFdCondition(fdobj, event, timeout):
1832 """Waits for a condition to occur on the socket.
1833
1834 Immediately returns at the first interruption.
1835
1836 @type fdobj: integer or object supporting a fileno() method
1837 @param fdobj: entity to wait for events on
1838 @type event: integer
1839 @param event: ORed condition (see select module)
1840 @type timeout: float or None
1841 @param timeout: Timeout in seconds
1842 @rtype: int or None
1843 @return: None for timeout, otherwise occured conditions
1844
1845 """
1846 check = (event | select.POLLPRI |
1847 select.POLLNVAL | select.POLLHUP | select.POLLERR)
1848
1849 if timeout is not None:
1850 # Poller object expects milliseconds
1851 timeout *= 1000
1852
1853 poller = select.poll()
1854 poller.register(fdobj, event)
1855 try:
1856 # TODO: If the main thread receives a signal and we have no timeout, we
1857 # could wait forever. This should check a global "quit" flag or something
1858 # every so often.
1859 io_events = poller.poll(timeout)
1860 except select.error, err:
1861 if err[0] != errno.EINTR:
1862 raise
1863 io_events = []
1864 if io_events and io_events[0][1] & check:
1865 return io_events[0][1]
1866 else:
1867 return None
1868
1869
1870 class FdConditionWaiterHelper(object):
1871 """Retry helper for WaitForFdCondition.
1872
1873 This class contains the retried and wait functions that make sure
1874 WaitForFdCondition can continue waiting until the timeout is actually
1875 expired.
1876
1877 """
1878
1879 def __init__(self, timeout):
1880 self.timeout = timeout
1881
1882 def Poll(self, fdobj, event):
1883 result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1884 if result is None:
1885 raise RetryAgain()
1886 else:
1887 return result
1888
1889 def UpdateTimeout(self, timeout):
1890 self.timeout = timeout
1891
1892
1893 def WaitForFdCondition(fdobj, event, timeout):
1894 """Waits for a condition to occur on the socket.
1895
1896 Retries until the timeout is expired, even if interrupted.
1897
1898 @type fdobj: integer or object supporting a fileno() method
1899 @param fdobj: entity to wait for events on
1900 @type event: integer
1901 @param event: ORed condition (see select module)
1902 @type timeout: float or None
1903 @param timeout: Timeout in seconds
1904 @rtype: int or None
1905 @return: None for timeout, otherwise occured conditions
1906
1907 """
1908 if timeout is not None:
1909 retrywaiter = FdConditionWaiterHelper(timeout)
1910 try:
1911 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1912 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1913 except RetryTimeout:
1914 result = None
1915 else:
1916 result = None
1917 while result is None:
1918 result = SingleWaitForFdCondition(fdobj, event, timeout)
1919 return result
1920
1921
1922 def TestDelay(duration):
1923 """Sleep for a fixed amount of time.
1924
1925 @type duration: float
1926 @param duration: the sleep duration
1927 @rtype: boolean
1928 @return: False for negative value, True otherwise
1929
1930 """
1931 if duration < 0:
1932 return False, "Invalid sleep duration"
1933 time.sleep(duration)
1934 return True, None
1935
1936
1937 def _CloseFDNoErr(fd, retries=5):
1938 """Close a file descriptor ignoring errors.
1939
1940 @type fd: int
1941 @param fd: the file descriptor
1942 @type retries: int
1943 @param retries: how many retries to make, in case we get any
1944 other error than EBADF
1945
1946 """
1947 try:
1948 os.close(fd)
1949 except OSError, err:
1950 if err.errno != errno.EBADF:
1951 if retries > 0:
1952 _CloseFDNoErr(fd, retries - 1)
1953 # else either it's closed already or we're out of retries, so we
1954 # ignore this and go on
1955
1956
1957 def CloseFDs(noclose_fds=None):
1958 """Close file descriptors.
1959
1960 This closes all file descriptors above 2 (i.e. except
1961 stdin/out/err).
1962
1963 @type noclose_fds: list or None
1964 @param noclose_fds: if given, it denotes a list of file descriptor
1965 that should not be closed
1966
1967 """
1968 # Default maximum for the number of available file descriptors.
1969 if 'SC_OPEN_MAX' in os.sysconf_names:
1970 try:
1971 MAXFD = os.sysconf('SC_OPEN_MAX')
1972 if MAXFD < 0:
1973 MAXFD = 1024
1974 except OSError:
1975 MAXFD = 1024
1976 else:
1977 MAXFD = 1024
1978 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1979 if (maxfd == resource.RLIM_INFINITY):
1980 maxfd = MAXFD
1981
1982 # Iterate through and close all file descriptors (except the standard ones)
1983 for fd in range(3, maxfd):
1984 if noclose_fds and fd in noclose_fds:
1985 continue
1986 _CloseFDNoErr(fd)
1987
1988
1989 def Daemonize(logfile):
1990 """Daemonize the current process.
1991
1992 This detaches the current process from the controlling terminal and
1993 runs it in the background as a daemon.
1994
1995 @type logfile: str
1996 @param logfile: the logfile to which we should redirect stdout/stderr
1997 @rtype: int
1998 @return: the value zero
1999
2000 """
2001 # pylint: disable-msg=W0212
2002 # yes, we really want os._exit
2003
2004 # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2005 # least abstract the pipe functionality between them
2006
2007 # Create pipe for sending error messages
2008 (rpipe, wpipe) = os.pipe()
2009
2010 # this might fail
2011 pid = os.fork()
2012 if (pid == 0): # The first child.
2013 SetupDaemonEnv()
2014
2015 # this might fail
2016 pid = os.fork() # Fork a second child.
2017 if (pid == 0): # The second child.
2018 _CloseFDNoErr(rpipe)
2019 else:
2020 # exit() or _exit()? See below.
2021 os._exit(0) # Exit parent (the first child) of the second child.
2022 else:
2023 _CloseFDNoErr(wpipe)
2024 # Wait for daemon to be started (or an error message to
2025 # arrive) and read up to 100 KB as an error message
2026 errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2027 if errormsg:
2028 sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2029 rcode = 1
2030 else:
2031 rcode = 0
2032 os._exit(rcode) # Exit parent of the first child.
2033
2034 SetupDaemonFDs(logfile, None)
2035 return wpipe
2036
2037
2038 def DaemonPidFileName(name):
2039 """Compute a ganeti pid file absolute path
2040
2041 @type name: str
2042 @param name: the daemon name
2043 @rtype: str
2044 @return: the full path to the pidfile corresponding to the given
2045 daemon name
2046
2047 """
2048 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2049
2050
2051 def EnsureDaemon(name):
2052 """Check for and start daemon if not alive.
2053
2054 """
2055 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2056 if result.failed:
2057 logging.error("Can't start daemon '%s', failure %s, output: %s",
2058 name, result.fail_reason, result.output)
2059 return False
2060
2061 return True
2062
2063
2064 def StopDaemon(name):
2065 """Stop daemon
2066
2067 """
2068 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2069 if result.failed:
2070 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2071 name, result.fail_reason, result.output)
2072 return False
2073
2074 return True
2075
2076
2077 def WritePidFile(pidfile):
2078 """Write the current process pidfile.
2079
2080 @type pidfile: sting
2081 @param pidfile: the path to the file to be written
2082 @raise errors.LockError: if the pid file already exists and
2083 points to a live process
2084 @rtype: int
2085 @return: the file descriptor of the lock file; do not close this unless
2086 you want to unlock the pid file
2087
2088 """
2089 # We don't rename nor truncate the file to not drop locks under
2090 # existing processes
2091 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2092
2093 # Lock the PID file (and fail if not possible to do so). Any code
2094 # wanting to send a signal to the daemon should try to lock the PID
2095 # file before reading it. If acquiring the lock succeeds, the daemon is
2096 # no longer running and the signal should not be sent.
2097 LockFile(fd_pidfile)
2098
2099 os.write(fd_pidfile, "%d\n" % os.getpid())
2100
2101 return fd_pidfile
2102
2103
2104 def RemovePidFile(name):
2105 """Remove the current process pidfile.
2106
2107 Any errors are ignored.
2108
2109 @type name: str
2110 @param name: the daemon name used to derive the pidfile name
2111
2112 """
2113 pidfilename = DaemonPidFileName(name)
2114 # TODO: we could check here that the file contains our pid
2115 try:
2116 RemoveFile(pidfilename)
2117 except: # pylint: disable-msg=W0702
2118 pass
2119
2120
2121 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2122 waitpid=False):
2123 """Kill a process given by its pid.
2124
2125 @type pid: int
2126 @param pid: The PID to terminate.
2127 @type signal_: int
2128 @param signal_: The signal to send, by default SIGTERM
2129 @type timeout: int
2130 @param timeout: The timeout after which, if the process is still alive,
2131 a SIGKILL will be sent. If not positive, no such checking
2132 will be done
2133 @type waitpid: boolean
2134 @param waitpid: If true, we should waitpid on this process after
2135 sending signals, since it's our own child and otherwise it
2136 would remain as zombie
2137
2138 """
2139 def _helper(pid, signal_, wait):
2140 """Simple helper to encapsulate the kill/waitpid sequence"""
2141 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2142 try:
2143 os.waitpid(pid, os.WNOHANG)
2144 except OSError:
2145 pass
2146
2147 if pid <= 0:
2148 # kill with pid=0 == suicide
2149 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2150
2151 if not IsProcessAlive(pid):
2152 return
2153
2154 _helper(pid, signal_, waitpid)
2155
2156 if timeout <= 0:
2157 return
2158
2159 def _CheckProcess():
2160 if not IsProcessAlive(pid):
2161 return
2162
2163 try:
2164 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2165 except OSError:
2166 raise RetryAgain()
2167
2168 if result_pid > 0:
2169 return
2170
2171 raise RetryAgain()
2172
2173 try:
2174 # Wait up to $timeout seconds
2175 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2176 except RetryTimeout:
2177 pass
2178
2179 if IsProcessAlive(pid):
2180 # Kill process if it's still alive
2181 _helper(pid, signal.SIGKILL, waitpid)
2182
2183
2184 def FindFile(name, search_path, test=os.path.exists):
2185 """Look for a filesystem object in a given path.
2186
2187 This is an abstract method to search for filesystem object (files,
2188 dirs) under a given search path.
2189
2190 @type name: str
2191 @param name: the name to look for
2192 @type search_path: str
2193 @param search_path: location to start at
2194 @type test: callable
2195 @param test: a function taking one argument that should return True
2196 if the a given object is valid; the default value is
2197 os.path.exists, causing only existing files to be returned
2198 @rtype: str or None
2199 @return: full path to the object if found, None otherwise
2200
2201 """
2202 # validate the filename mask
2203 if constants.EXT_PLUGIN_MASK.match(name) is None:
2204 logging.critical("Invalid value passed for external script name: '%s'",
2205 name)
2206 return None
2207
2208 for dir_name in search_path:
2209 # FIXME: investigate switch to PathJoin
2210 item_name = os.path.sep.join([dir_name, name])
2211 # check the user test and that we're indeed resolving to the given
2212 # basename
2213 if test(item_name) and os.path.basename(item_name) == name:
2214 return item_name
2215 return None
2216
2217
2218 def CheckVolumeGroupSize(vglist, vgname, minsize):
2219 """Checks if the volume group list is valid.
2220
2221 The function will check if a given volume group is in the list of
2222 volume groups and has a minimum size.
2223
2224 @type vglist: dict
2225 @param vglist: dictionary of volume group names and their size
2226 @type vgname: str
2227 @param vgname: the volume group we should check
2228 @type minsize: int
2229 @param minsize: the minimum size we accept
2230 @rtype: None or str
2231 @return: None for success, otherwise the error message
2232
2233 """
2234 vgsize = vglist.get(vgname, None)
2235 if vgsize is None:
2236 return "volume group '%s' missing" % vgname
2237 elif vgsize < minsize:
2238 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2239 (vgname, minsize, vgsize))
2240 return None
2241
2242
2243 def SplitTime(value):
2244 """Splits time as floating point number into a tuple.
2245
2246 @param value: Time in seconds
2247 @type value: int or float
2248 @return: Tuple containing (seconds, microseconds)
2249
2250 """
2251 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2252
2253 assert 0 <= seconds, \
2254 "Seconds must be larger than or equal to 0, but are %s" % seconds
2255 assert 0 <= microseconds <= 999999, \
2256 "Microseconds must be 0-999999, but are %s" % microseconds
2257
2258 return (int(seconds), int(microseconds))
2259
2260
2261 def MergeTime(timetuple):
2262 """Merges a tuple into time as a floating point number.
2263
2264 @param timetuple: Time as tuple, (seconds, microseconds)
2265 @type timetuple: tuple
2266 @return: Time as a floating point number expressed in seconds
2267
2268 """
2269 (seconds, microseconds) = timetuple
2270
2271 assert 0 <= seconds, \
2272 "Seconds must be larger than or equal to 0, but are %s" % seconds
2273 assert 0 <= microseconds <= 999999, \
2274 "Microseconds must be 0-999999, but are %s" % microseconds
2275
2276 return float(seconds) + (float(microseconds) * 0.000001)
2277
2278
2279 def IsNormAbsPath(path):
2280 """Check whether a path is absolute and also normalized
2281
2282 This avoids things like /dir/../../other/path to be valid.
2283
2284 """
2285 return os.path.normpath(path) == path and os.path.isabs(path)
2286
2287
2288 def PathJoin(*args):
2289 """Safe-join a list of path components.
2290
2291 Requirements:
2292 - the first argument must be an absolute path
2293 - no component in the path must have backtracking (e.g. /../),
2294 since we check for normalization at the end
2295
2296 @param args: the path components to be joined
2297 @raise ValueError: for invalid paths
2298
2299 """
2300 # ensure we're having at least one path passed in
2301 assert args
2302 # ensure the first component is an absolute and normalized path name
2303 root = args[0]
2304 if not IsNormAbsPath(root):
2305 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2306 result = os.path.join(*args)
2307 # ensure that the whole path is normalized
2308 if not IsNormAbsPath(result):
2309 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2310 # check that we're still under the original prefix
2311 prefix = os.path.commonprefix([root, result])
2312 if prefix != root:
2313 raise ValueError("Error: path joining resulted in different prefix"
2314 " (%s != %s)" % (prefix, root))
2315 return result
2316
2317
2318 def TailFile(fname, lines=20):
2319 """Return the last lines from a file.
2320
2321 @note: this function will only read and parse the last 4KB of
2322 the file; if the lines are very long, it could be that less
2323 than the requested number of lines are returned
2324
2325 @param fname: the file name
2326 @type lines: int
2327 @param lines: the (maximum) number of lines to return
2328
2329 """
2330 fd = open(fname, "r")
2331 try:
2332 fd.seek(0, 2)
2333 pos = fd.tell()
2334 pos = max(0, pos-4096)
2335 fd.seek(pos, 0)
2336 raw_data = fd.read()
2337 finally:
2338 fd.close()
2339
2340 rows = raw_data.splitlines()
2341 return rows[-lines:]
2342
2343
2344 def _ParseAsn1Generalizedtime(value):
2345 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2346
2347 @type value: string
2348 @param value: ASN1 GENERALIZEDTIME timestamp
2349 @return: Seconds since the Epoch (1970-01-01 00:00:00 UTC)
2350
2351 """
2352 m = _ASN1_TIME_REGEX.match(value)
2353 if m:
2354 # We have an offset
2355 asn1time = m.group(1)
2356 hours = int(m.group(2))
2357 minutes = int(m.group(3))
2358 utcoffset = (60 * hours) + minutes
2359 else:
2360 if not value.endswith("Z"):
2361 raise ValueError("Missing timezone")
2362 asn1time = value[:-1]
2363 utcoffset = 0
2364
2365 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2366
2367 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2368
2369 return calendar.timegm(tt.utctimetuple())
2370
2371
2372 def GetX509CertValidity(cert):
2373 """Returns the validity period of the certificate.
2374
2375 @type cert: OpenSSL.crypto.X509
2376 @param cert: X509 certificate object
2377
2378 """
2379 # The get_notBefore and get_notAfter functions are only supported in
2380 # pyOpenSSL 0.7 and above.
2381 try:
2382 get_notbefore_fn = cert.get_notBefore
2383 except AttributeError:
2384 not_before = None
2385 else:
2386 not_before_asn1 = get_notbefore_fn()
2387
2388 if not_before_asn1 is None:
2389 not_before = None
2390 else:
2391 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2392
2393 try:
2394 get_notafter_fn = cert.get_notAfter
2395 except AttributeError:
2396 not_after = None
2397 else:
2398 not_after_asn1 = get_notafter_fn()
2399
2400 if not_after_asn1 is None:
2401 not_after = None
2402 else:
2403 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2404
2405 return (not_before, not_after)
2406
2407
2408 def _VerifyCertificateInner(expired, not_before, not_after, now,
2409 warn_days, error_days):
2410 """Verifies certificate validity.
2411
2412 @type expired: bool
2413 @param expired: Whether pyOpenSSL considers the certificate as expired
2414 @type not_before: number or None
2415 @param not_before: Unix timestamp before which certificate is not valid
2416 @type not_after: number or None
2417 @param not_after: Unix timestamp after which certificate is invalid
2418 @type now: number
2419 @param now: Current time as Unix timestamp
2420 @type warn_days: number or None
2421 @param warn_days: How many days before expiration a warning should be reported
2422 @type error_days: number or None
2423 @param error_days: How many days before expiration an error should be reported
2424
2425 """
2426 if expired:
2427 msg = "Certificate is expired"
2428
2429 if not_before is not None and not_after is not None:
2430 msg += (" (valid from %s to %s)" %
2431 (FormatTime(not_before), FormatTime(not_after)))
2432 elif not_before is not None:
2433 msg += " (valid from %s)" % FormatTime(not_before)
2434 elif not_after is not None:
2435 msg += " (valid until %s)" % FormatTime(not_after)
2436
2437 return (CERT_ERROR, msg)
2438
2439 elif not_before is not None and not_before > now:
2440 return (CERT_WARNING,
2441 "Certificate not yet valid (valid from %s)" %
2442 FormatTime(not_before))
2443
2444 elif not_after is not None:
2445 remaining_days = int((not_after - now) / (24 * 3600))
2446
2447 msg = "Certificate expires in about %d days" % remaining_days
2448
2449 if error_days is not None and remaining_days <= error_days:
2450 return (CERT_ERROR, msg)
2451
2452 if warn_days is not None and remaining_days <= warn_days:
2453 return (CERT_WARNING, msg)
2454
2455 return (None, None)
2456
2457
2458 def VerifyX509Certificate(cert, warn_days, error_days):
2459 """Verifies a certificate for LUVerifyCluster.
2460
2461 @type cert: OpenSSL.crypto.X509
2462 @param cert: X509 certificate object
2463 @type warn_days: number or None
2464 @param warn_days: How many days before expiration a warning should be reported
2465 @type error_days: number or None
2466 @param error_days: How many days before expiration an error should be reported
2467
2468 """
2469 # Depending on the pyOpenSSL version, this can just return (None, None)
2470 (not_before, not_after) = GetX509CertValidity(cert)
2471
2472 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2473 time.time(), warn_days, error_days)
2474
2475
2476 def SignX509Certificate(cert, key, salt):
2477 """Sign a X509 certificate.
2478
2479 An RFC822-like signature header is added in front of the certificate.
2480
2481 @type cert: OpenSSL.crypto.X509
2482 @param cert: X509 certificate object
2483 @type key: string
2484 @param key: Key for HMAC
2485 @type salt: string
2486 @param salt: Salt for HMAC
2487 @rtype: string
2488 @return: Serialized and signed certificate in PEM format
2489
2490 """
2491 if not VALID_X509_SIGNATURE_SALT.match(salt):
2492 raise errors.GenericError("Invalid salt: %r" % salt)
2493
2494 # Dumping as PEM here ensures the certificate is in a sane format
2495 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2496
2497 return ("%s: %s/%s\n\n%s" %
2498 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2499 Sha1Hmac(key, cert_pem, salt=salt),
2500 cert_pem))
2501
2502
2503 def _ExtractX509CertificateSignature(cert_pem):
2504 """Helper function to extract signature from X509 certificate.
2505
2506 """
2507 # Extract signature from original PEM data
2508 for line in cert_pem.splitlines():
2509 if line.startswith("---"):
2510 break
2511
2512 m = X509_SIGNATURE.match(line.strip())
2513 if m:
2514 return (m.group("salt"), m.group("sign"))
2515
2516 raise errors.GenericError("X509 certificate signature is missing")
2517
2518
2519 def LoadSignedX509Certificate(cert_pem, key):
2520 """Verifies a signed X509 certificate.
2521
2522 @type cert_pem: string
2523 @param cert_pem: Certificate in PEM format and with signature header
2524 @type key: string
2525 @param key: Key for HMAC
2526 @rtype: tuple; (OpenSSL.crypto.X509, string)
2527 @return: X509 certificate object and salt
2528
2529 """
2530 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2531
2532 # Load certificate
2533 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2534
2535 # Dump again to ensure it's in a sane format
2536 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2537
2538 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2539 raise errors.GenericError("X509 certificate signature is invalid")
2540
2541 return (cert, salt)
2542
2543
2544 def Sha1Hmac(key, text, salt=None):
2545 """Calculates the HMAC-SHA1 digest of a text.
2546
2547 HMAC is defined in RFC2104.
2548
2549 @type key: string
2550 @param key: Secret key
2551 @type text: string
2552
2553 """
2554 if salt:
2555 salted_text = salt + text
2556 else:
2557 salted_text = text
2558
2559 return hmac.new(key, salted_text, compat.sha1).hexdigest()
2560
2561
2562 def VerifySha1Hmac(key, text, digest, salt=None):
2563 """Verifies the HMAC-SHA1 digest of a text.
2564
2565 HMAC is defined in RFC2104.
2566
2567 @type key: string
2568 @param key: Secret key
2569 @type text: string
2570 @type digest: string
2571 @param digest: Expected digest
2572 @rtype: bool
2573 @return: Whether HMAC-SHA1 digest matches
2574
2575 """
2576 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2577
2578
2579 def FindMatch(data, name):
2580 """Tries to find an item in a dictionary matching a name.
2581
2582 Callers have to ensure the data names aren't contradictory (e.g. a regexp
2583 that matches a string). If the name isn't a direct key, all regular
2584 expression objects in the dictionary are matched against it.
2585
2586 @type data: dict
2587 @param data: Dictionary containing data
2588 @type name: string
2589 @param name: Name to look for
2590 @rtype: tuple; (value in dictionary, matched groups as list)
2591
2592 """
2593 if name in data:
2594 return (data[name], [])
2595
2596 for key, value in data.items():
2597 # Regex objects
2598 if hasattr(key, "match"):
2599 m = key.match(name)
2600 if m:
2601 return (value, list(m.groups()))
2602
2603 return None
2604
2605
2606 def BytesToMebibyte(value):
2607 """Converts bytes to mebibytes.
2608
2609 @type value: int
2610 @param value: Value in bytes
2611 @rtype: int
2612 @return: Value in mebibytes
2613
2614 """
2615 return int(round(value / (1024.0 * 1024.0), 0))
2616
2617
2618 def CalculateDirectorySize(path):
2619 """Calculates the size of a directory recursively.
2620
2621 @type path: string
2622 @param path: Path to directory
2623 @rtype: int
2624 @return: Size in mebibytes
2625
2626 """
2627 size = 0
2628
2629 for (curpath, _, files) in os.walk(path):
2630 for filename in files:
2631 st = os.lstat(PathJoin(curpath, filename))
2632 size += st.st_size
2633
2634 return BytesToMebibyte(size)
2635
2636
2637 def GetMounts(filename=constants.PROC_MOUNTS):
2638 """Returns the list of mounted filesystems.
2639
2640 This function is Linux-specific.
2641
2642 @param filename: path of mounts file (/proc/mounts by default)
2643 @rtype: list of tuples
2644 @return: list of mount entries (device, mountpoint, fstype, options)
2645
2646 """
2647 # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2648 data = []
2649 mountlines = ReadFile(filename).splitlines()
2650 for line in mountlines:
2651 device, mountpoint, fstype, options, _ = line.split(None, 4)
2652 data.append((device, mountpoint, fstype, options))
2653
2654 return data
2655
2656
2657 def GetFilesystemStats(path):
2658 """Returns the total and free space on a filesystem.
2659
2660 @type path: string
2661 @param path: Path on filesystem to be examined
2662 @rtype: int
2663 @return: tuple of (Total space, Free space) in mebibytes
2664
2665 """
2666 st = os.statvfs(path)
2667
2668 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2669 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2670 return (tsize, fsize)
2671
2672
2673 def RunInSeparateProcess(fn, *args):
2674 """Runs a function in a separate process.
2675
2676 Note: Only boolean return values are supported.
2677
2678 @type fn: callable
2679 @param fn: Function to be called
2680 @rtype: bool
2681 @return: Function's result
2682
2683 """
2684 pid = os.fork()
2685 if pid == 0:
2686 # Child process
2687 try:
2688 # In case the function uses temporary files
2689 ResetTempfileModule()
2690
2691 # Call function
2692 result = int(bool(fn(*args)))
2693 assert result in (0, 1)
2694 except: # pylint: disable-msg=W0702
2695 logging.exception("Error while calling function in separate process")
2696 # 0 and 1 are reserved for the return value
2697 result = 33
2698
2699 os._exit(result) # pylint: disable-msg=W0212
2700
2701 # Parent process
2702
2703 # Avoid zombies and check exit code
2704 (_, status) = os.waitpid(pid, 0)
2705
2706 if os.WIFSIGNALED(status):
2707 exitcode = None
2708 signum = os.WTERMSIG(status)
2709 else:
2710 exitcode = os.WEXITSTATUS(status)
2711 signum = None
2712
2713 if not (exitcode in (0, 1) and signum is None):
2714 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2715 (exitcode, signum))
2716
2717 return bool(exitcode)
2718
2719
2720 def IgnoreProcessNotFound(fn, *args, **kwargs):
2721 """Ignores ESRCH when calling a process-related function.
2722
2723 ESRCH is raised when a process is not found.
2724
2725 @rtype: bool
2726 @return: Whether process was found
2727
2728 """
2729 try:
2730 fn(*args, **kwargs)
2731 except EnvironmentError, err:
2732 # Ignore ESRCH
2733 if err.errno == errno.ESRCH:
2734 return False
2735 raise
2736
2737 return True
2738
2739
2740 def IgnoreSignals(fn, *args, **kwargs):
2741 """Tries to call a function ignoring failures due to EINTR.
2742
2743 """
2744 try:
2745 return fn(*args, **kwargs)
2746 except EnvironmentError, err:
2747 if err.errno == errno.EINTR:
2748 return None
2749 else:
2750 raise
2751 except (select.error, socket.error), err:
2752 # In python 2.6 and above select.error is an IOError, so it's handled
2753 # above, in 2.5 and below it's not, and it's handled here.
2754 if err.args and err.args[0] == errno.EINTR:
2755 return None
2756 else:
2757 raise
2758
2759
2760 def LockFile(fd):
2761 """Locks a file using POSIX locks.
2762
2763 @type fd: int
2764 @param fd: the file descriptor we need to lock
2765
2766 """
2767 try:
2768 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2769 except IOError, err:
2770 if err.errno == errno.EAGAIN:
2771 raise errors.LockError("File already locked")
2772 raise
2773
2774
2775 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2776 """Reads the watcher pause file.
2777
2778 @type filename: string
2779 @param filename: Path to watcher pause file
2780 @type now: None, float or int
2781 @param now: Current time as Unix timestamp
2782 @type remove_after: int
2783 @param remove_after: Remove watcher pause file after specified amount of
2784 seconds past the pause end time
2785
2786 """
2787 if now is None:
2788 now = time.time()
2789
2790 try:
2791 value = ReadFile(filename)
2792 except IOError, err:
2793 if err.errno != errno.ENOENT:
2794 raise
2795 value = None
2796
2797 if value is not None:
2798 try:
2799 value = int(value)
2800 except ValueError:
2801 logging.warning(("Watcher pause file (%s) contains invalid value,"
2802 " removing it"), filename)
2803 RemoveFile(filename)
2804 value = None
2805
2806 if value is not None:
2807 # Remove file if it's outdated
2808 if now > (value + remove_after):
2809 RemoveFile(filename)
2810 value = None
2811
2812 elif now > value:
2813 value = None
2814
2815 return value
2816
2817
2818 def GetClosedTempfile(*args, **kwargs):
2819 """Creates a temporary file and returns its path.
2820
2821 """
2822 (fd, path) = tempfile.mkstemp(*args, **kwargs)
2823 _CloseFDNoErr(fd)
2824 return path
2825
2826
2827 def GenerateSelfSignedX509Cert(common_name, validity):
2828 """Generates a self-signed X509 certificate.
2829
2830 @type common_name: string
2831 @param common_name: commonName value
2832 @type validity: int
2833 @param validity: Validity for certificate in seconds
2834
2835 """
2836 # Create private and public key
2837 key = OpenSSL.crypto.PKey()
2838 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
2839
2840 # Create self-signed certificate
2841 cert = OpenSSL.crypto.X509()
2842 if common_name:
2843 cert.get_subject().CN = common_name
2844 cert.set_serial_number(1)
2845 cert.gmtime_adj_notBefore(0)
2846 cert.gmtime_adj_notAfter(validity)
2847 cert.set_issuer(cert.get_subject())
2848 cert.set_pubkey(key)
2849 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
2850
2851 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
2852 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2853
2854 return (key_pem, cert_pem)
2855
2856
2857 def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
2858 validity=constants.X509_CERT_DEFAULT_VALIDITY):
2859 """Legacy function to generate self-signed X509 certificate.
2860
2861 @type filename: str
2862 @param filename: path to write certificate to
2863 @type common_name: string
2864 @param common_name: commonName value
2865 @type validity: int
2866 @param validity: validity of certificate in number of days
2867
2868 """
2869 # TODO: Investigate using the cluster name instead of X505_CERT_CN for
2870 # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
2871 # and node daemon certificates have the proper Subject/Issuer.
2872 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
2873 validity * 24 * 60 * 60)
2874
2875 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
2876
2877
2878 class FileLock(object):
2879 """Utility class for file locks.
2880
2881 """
2882 def __init__(self, fd, filename):
2883 """Constructor for FileLock.
2884
2885 @type fd: file
2886 @param fd: File object
2887 @type filename: str
2888 @param filename: Path of the file opened at I{fd}
2889
2890 """
2891 self.fd = fd
2892 self.filename = filename
2893
2894 @classmethod
2895 def Open(cls, filename):
2896 """Creates and opens a file to be used as a file-based lock.
2897
2898 @type filename: string
2899 @param filename: path to the file to be locked
2900
2901 """
2902 # Using "os.open" is necessary to allow both opening existing file
2903 # read/write and creating if not existing. Vanilla "open" will truncate an
2904 # existing file -or- allow creating if not existing.
2905 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2906 filename)
2907
2908 def __del__(self):
2909 self.Close()
2910
2911 def Close(self):
2912 """Close the file and release the lock.
2913
2914 """
2915 if hasattr(self, "fd") and self.fd:
2916 self.fd.close()
2917 self.fd = None
2918
2919 def _flock(self, flag, blocking, timeout, errmsg):
2920 """Wrapper for fcntl.flock.
2921
2922 @type flag: int
2923 @param flag: operation flag
2924 @type blocking: bool
2925 @param blocking: whether the operation should be done in blocking mode.
2926 @type timeout: None or float
2927 @param timeout: for how long the operation should be retried (implies
2928 non-blocking mode).
2929 @type errmsg: string
2930 @param errmsg: error message in case operation fails.
2931
2932 """
2933 assert self.fd, "Lock was closed"
2934 assert timeout is None or timeout >= 0, \
2935 "If specified, timeout must be positive"
2936 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2937
2938 # When a timeout is used, LOCK_NB must always be set
2939 if not (timeout is None and blocking):
2940 flag |= fcntl.LOCK_NB
2941
2942 if timeout is None:
2943 self._Lock(self.fd, flag, timeout)
2944 else:
2945 try:
2946 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2947 args=(self.fd, flag, timeout))
2948 except RetryTimeout:
2949 raise errors.LockError(errmsg)
2950
2951 @staticmethod
2952 def _Lock(fd, flag, timeout):
2953 try:
2954 fcntl.flock(fd, flag)
2955 except IOError, err:
2956 if timeout is not None and err.errno == errno.EAGAIN:
2957 raise RetryAgain()
2958
2959 logging.exception("fcntl.flock failed")
2960 raise
2961
2962 def Exclusive(self, blocking=False, timeout=None):
2963 """Locks the file in exclusive mode.
2964
2965 @type blocking: boolean
2966 @param blocking: whether to block and wait until we
2967 can lock the file or return immediately
2968 @type timeout: int or None
2969 @param timeout: if not None, the duration to wait for the lock
2970 (in blocking mode)
2971
2972 """
2973 self._flock(fcntl.LOCK_EX, blocking, timeout,
2974 "Failed to lock %s in exclusive mode" % self.filename)
2975
2976 def Shared(self, blocking=False, timeout=None):
2977 """Locks the file in shared mode.
2978
2979 @type blocking: boolean
2980 @param blocking: whether to block and wait until we
2981 can lock the file or return immediately
2982 @type timeout: int or None
2983 @param timeout: if not None, the duration to wait for the lock
2984 (in blocking mode)
2985
2986 """
2987 self._flock(fcntl.LOCK_SH, blocking, timeout,
2988 "Failed to lock %s in shared mode" % self.filename)
2989
2990 def Unlock(self, blocking=True, timeout=None):
2991 """Unlocks the file.
2992
2993 According to C{flock(2)}, unlocking can also be a nonblocking
2994 operation::
2995
2996 To make a non-blocking request, include LOCK_NB with any of the above
2997 operations.
2998
2999 @type blocking: boolean
3000 @param blocking: whether to block and wait until we
3001 can lock the file or return immediately
3002 @type timeout: int or None
3003 @param timeout: if not None, the duration to wait for the lock
3004 (in blocking mode)
3005
3006 """
3007 self._flock(fcntl.LOCK_UN, blocking, timeout,
3008 "Failed to unlock %s" % self.filename)
3009
3010
3011 def SignalHandled(signums):
3012 """Signal Handled decoration.
3013
3014 This special decorator installs a signal handler and then calls the target
3015 function. The function must accept a 'signal_handlers' keyword argument,
3016 which will contain a dict indexed by signal number, with SignalHandler
3017 objects as values.
3018
3019 The decorator can be safely stacked with iself, to handle multiple signals
3020 with different handlers.
3021
3022 @type signums: list
3023 @param signums: signals to intercept
3024
3025 """
3026 def wrap(fn):
3027 def sig_function(*args, **kwargs):
3028 assert 'signal_handlers' not in kwargs or \
3029 kwargs['signal_handlers'] is None or \
3030 isinstance(kwargs['signal_handlers'], dict), \
3031 "Wrong signal_handlers parameter in original function call"
3032 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3033 signal_handlers = kwargs['signal_handlers']
3034 else:
3035 signal_handlers = {}
3036 kwargs['signal_handlers'] = signal_handlers
3037 sighandler = SignalHandler(signums)
3038 try:
3039 for sig in signums:
3040 signal_handlers[sig] = sighandler
3041 return fn(*args, **kwargs)
3042 finally:
3043 sighandler.Reset()
3044 return sig_function
3045 return wrap
3046
3047
3048 class SignalWakeupFd(object):
3049 try:
3050 # This is only supported in Python 2.5 and above (some distributions
3051 # backported it to Python 2.4)
3052 _set_wakeup_fd_fn = signal.set_wakeup_fd
3053 except AttributeError:
3054 # Not supported
3055 def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3056 return -1
3057 else:
3058 def _SetWakeupFd(self, fd):
3059 return self._set_wakeup_fd_fn(fd)
3060
3061 def __init__(self):
3062 """Initializes this class.
3063
3064 """
3065 (read_fd, write_fd) = os.pipe()
3066
3067 # Once these succeeded, the file descriptors will be closed automatically.
3068 # Buffer size 0 is important, otherwise .read() with a specified length
3069 # might buffer data and the file descriptors won't be marked readable.
3070 self._read_fh = os.fdopen(read_fd, "r", 0)
3071 self._write_fh = os.fdopen(write_fd, "w", 0)
3072
3073 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3074
3075 # Utility functions
3076 self.fileno = self._read_fh.fileno
3077 self.read = self._read_fh.read
3078
3079 def Reset(self):
3080 """Restores the previous wakeup file descriptor.
3081
3082 """
3083 if hasattr(self, "_previous") and self._previous is not None:
3084 self._SetWakeupFd(self._previous)
3085 self._previous = None
3086
3087 def Notify(self):
3088 """Notifies the wakeup file descriptor.
3089
3090 """
3091 self._write_fh.write("\0")
3092
3093 def __del__(self):
3094 """Called before object deletion.
3095
3096 """
3097 self.Reset()
3098
3099
3100 class SignalHandler(object):
3101 """Generic signal handler class.
3102
3103 It automatically restores the original handler when deconstructed or
3104 when L{Reset} is called. You can either pass your own handler
3105 function in or query the L{called} attribute to detect whether the
3106 signal was sent.
3107
3108 @type signum: list
3109 @ivar signum: the signals we handle
3110 @type called: boolean
3111 @ivar called: tracks whether any of the signals have been raised
3112
3113 """
3114 def __init__(self, signum, handler_fn=None, wakeup=None):
3115 """Constructs a new SignalHandler instance.
3116
3117 @type signum: int or list of ints
3118 @param signum: Single signal number or set of signal numbers
3119 @type handler_fn: callable
3120 @param handler_fn: Signal handling function
3121
3122 """
3123 assert handler_fn is None or callable(handler_fn)
3124
3125 self.signum = set(signum)
3126 self.called = False
3127
3128 self._handler_fn = handler_fn
3129 self._wakeup = wakeup
3130
3131 self._previous = {}
3132 try:
3133 for signum in self.signum:
3134 # Setup handler
3135 prev_handler = signal.signal(signum, self._HandleSignal)
3136 try:
3137 self._previous[signum] = prev_handler
3138 except:
3139 # Restore previous handler
3140 signal.signal(signum, prev_handler)
3141 raise
3142 except:
3143 # Reset all handlers
3144 self.Reset()
3145 # Here we have a race condition: a handler may have already been called,
3146 # but there's not much we can do about it at this point.
3147 raise
3148
3149 def __del__(self):
3150 self.Reset()
3151
3152 def Reset(self):
3153 """Restore previous handler.
3154
3155 This will reset all the signals to their previous handlers.
3156
3157 """
3158 for signum, prev_handler in self._previous.items():
3159 signal.signal(signum, prev_handler)
3160 # If successful, remove from dict
3161 del self._previous[signum]
3162
3163 def Clear(self):
3164 """Unsets the L{called} flag.
3165
3166 This function can be used in case a signal may arrive several times.
3167
3168 """
3169 self.called = False
3170
3171 def _HandleSignal(self, signum, frame):
3172 """Actual signal handling function.
3173
3174 """
3175 # This is not nice and not absolutely atomic, but it appears to be the only
3176 # solution in Python -- there are no atomic types.
3177 self.called = True
3178
3179 if self._wakeup:
3180 # Notify whoever is interested in signals
3181 self._wakeup.Notify()
3182
3183 if self._handler_fn:
3184 self._handler_fn(signum, frame)
3185
3186
3187 class FieldSet(object):
3188 """A simple field set.
3189
3190 Among the features are:
3191 - checking if a string is among a list of static string or regex objects
3192 - checking if a whole list of string matches
3193 - returning the matching groups from a regex match
3194
3195 Internally, all fields are held as regular expression objects.
3196
3197 """
3198 def __init__(self, *items):
3199 self.items = [re.compile("^%s$" % value) for value in items]
3200
3201 def Extend(self, other_set):
3202 """Extend the field set with the items from another one"""
3203 self.items.extend(other_set.items)
3204
3205 def Matches(self, field):
3206 """Checks if a field matches the current set
3207
3208 @type field: str
3209 @param field: the string to match
3210 @return: either None or a regular expression match object
3211
3212 """
3213 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3214 return m
3215 return None
3216
3217 def NonMatching(self, items):
3218 """Returns the list of fields not matching the current set
3219
3220 @type items: list
3221 @param items: the list of fields to check
3222 @rtype: list
3223 @return: list of non-matching fields
3224
3225 """
3226 return [val for val in items if not self.Matches(val)]
3227
3228
3229 class RunningTimeout(object):
3230 """Class to calculate remaining timeout when doing several operations.
3231
3232 """
3233 __slots__ = [
3234 "_allow_negative",
3235 "_start_time",
3236 "_time_fn",
3237 "_timeout",
3238 ]
3239
3240 def __init__(self, timeout, allow_negative, _time_fn=time.time):
3241 """Initializes this class.
3242
3243 @type timeout: float
3244 @param timeout: Timeout duration
3245 @type allow_negative: bool
3246 @param allow_negative: Whether to return values below zero
3247 @param _time_fn: Time function for unittests
3248
3249 """
3250 object.__init__(self)
3251
3252 if timeout is not None and timeout < 0.0:
3253 raise ValueError("Timeout must not be negative")
3254
3255 self._timeout = timeout
3256 self._allow_negative = allow_negative
3257 self._time_fn = _time_fn
3258
3259 self._start_time = None
3260
3261 def Remaining(self):
3262 """Returns the remaining timeout.
3263
3264 """
3265 if self._timeout is None:
3266 return None
3267
3268 # Get start time on first calculation
3269 if self._start_time is None:
3270 self._start_time = self._time_fn()
3271
3272 # Calculate remaining time
3273 remaining_timeout = self._start_time + self._timeout - self._time_fn()
3274
3275 if not self._allow_negative:
3276 # Ensure timeout is always >= 0
3277 return max(0.0, remaining_timeout)
3278
3279 return remaining_timeout