Merge branch 'stable-2.16' into stable-2.17
[ganeti-github.git] / lib / cli.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Module dealing with command line parsing"""
32
33
34 import sys
35 import textwrap
36 import os.path
37 import time
38 import logging
39 import errno
40 import itertools
41 import shlex
42
43 from cStringIO import StringIO
44 from optparse import (OptionParser, TitledHelpFormatter)
45
46 from ganeti import utils
47 from ganeti import errors
48 from ganeti import constants
49 from ganeti import opcodes
50 import ganeti.rpc.errors as rpcerr
51 from ganeti import ssh
52 from ganeti import compat
53 from ganeti import netutils
54 from ganeti import qlang
55 from ganeti import objects
56 from ganeti import pathutils
57 from ganeti import serializer
58 import ganeti.cli_opts
59 # Import constants
60 from ganeti.cli_opts import * # pylint: disable=W0401,W0614
61
62 from ganeti.runtime import (GetClient)
63
64
65 __all__ = [
66 # Generic functions for CLI programs
67 "ConfirmOperation",
68 "CreateIPolicyFromOpts",
69 "GenericMain",
70 "GenericInstanceCreate",
71 "GenericList",
72 "GenericListFields",
73 "GetClient",
74 "GetOnlineNodes",
75 "GetNodesSshPorts",
76 "GetNodeUUIDs",
77 "JobExecutor",
78 "ParseTimespec",
79 "RunWhileClusterStopped",
80 "RunWhileDaemonsStopped",
81 "SubmitOpCode",
82 "SubmitOpCodeToDrainedQueue",
83 "SubmitOrSend",
84 # Formatting functions
85 "ToStderr", "ToStdout",
86 "ToStdoutAndLoginfo",
87 "FormatError",
88 "FormatQueryResult",
89 "FormatParamsDictInfo",
90 "FormatPolicyInfo",
91 "PrintIPolicyCommand",
92 "PrintGenericInfo",
93 "GenerateTable",
94 "AskUser",
95 "FormatTimestamp",
96 "FormatLogMessage",
97 # Tags functions
98 "ListTags",
99 "AddTags",
100 "RemoveTags",
101 # command line options support infrastructure
102 "ARGS_MANY_INSTANCES",
103 "ARGS_MANY_NODES",
104 "ARGS_MANY_GROUPS",
105 "ARGS_MANY_NETWORKS",
106 "ARGS_MANY_FILTERS",
107 "ARGS_NONE",
108 "ARGS_ONE_INSTANCE",
109 "ARGS_ONE_NODE",
110 "ARGS_ONE_GROUP",
111 "ARGS_ONE_OS",
112 "ARGS_ONE_NETWORK",
113 "ARGS_ONE_FILTER",
114 "ArgChoice",
115 "ArgCommand",
116 "ArgFile",
117 "ArgGroup",
118 "ArgHost",
119 "ArgInstance",
120 "ArgJobId",
121 "ArgNetwork",
122 "ArgNode",
123 "ArgOs",
124 "ArgExtStorage",
125 "ArgFilter",
126 "ArgSuggest",
127 "ArgUnknown",
128 "FixHvParams",
129 "SplitNodeOption",
130 "CalculateOSNames",
131 "ParseFields",
132 ] + ganeti.cli_opts.__all__ # Command line options
133
134 # Query result status for clients
135 (QR_NORMAL,
136 QR_UNKNOWN,
137 QR_INCOMPLETE) = range(3)
138
139 #: Maximum batch size for ChooseJob
140 _CHOOSE_BATCH = 25
141
142
143 # constants used to create InstancePolicy dictionary
144 TISPECS_GROUP_TYPES = {
145 constants.ISPECS_MIN: constants.VTYPE_INT,
146 constants.ISPECS_MAX: constants.VTYPE_INT,
147 }
148
149 TISPECS_CLUSTER_TYPES = {
150 constants.ISPECS_MIN: constants.VTYPE_INT,
151 constants.ISPECS_MAX: constants.VTYPE_INT,
152 constants.ISPECS_STD: constants.VTYPE_INT,
153 }
154
155 #: User-friendly names for query2 field types
156 _QFT_NAMES = {
157 constants.QFT_UNKNOWN: "Unknown",
158 constants.QFT_TEXT: "Text",
159 constants.QFT_BOOL: "Boolean",
160 constants.QFT_NUMBER: "Number",
161 constants.QFT_NUMBER_FLOAT: "Floating-point number",
162 constants.QFT_UNIT: "Storage size",
163 constants.QFT_TIMESTAMP: "Timestamp",
164 constants.QFT_OTHER: "Custom",
165 }
166
167
168 class _Argument(object):
169 def __init__(self, min=0, max=None): # pylint: disable=W0622
170 self.min = min
171 self.max = max
172
173 def __repr__(self):
174 return ("<%s min=%s max=%s>" %
175 (self.__class__.__name__, self.min, self.max))
176
177
178 class ArgSuggest(_Argument):
179 """Suggesting argument.
180
181 Value can be any of the ones passed to the constructor.
182
183 """
184 # pylint: disable=W0622
185 def __init__(self, min=0, max=None, choices=None):
186 _Argument.__init__(self, min=min, max=max)
187 self.choices = choices
188
189 def __repr__(self):
190 return ("<%s min=%s max=%s choices=%r>" %
191 (self.__class__.__name__, self.min, self.max, self.choices))
192
193
194 class ArgChoice(ArgSuggest):
195 """Choice argument.
196
197 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
198 but value must be one of the choices.
199
200 """
201
202
203 class ArgUnknown(_Argument):
204 """Unknown argument to program (e.g. determined at runtime).
205
206 """
207
208
209 class ArgInstance(_Argument):
210 """Instances argument.
211
212 """
213
214
215 class ArgNode(_Argument):
216 """Node argument.
217
218 """
219
220
221 class ArgNetwork(_Argument):
222 """Network argument.
223
224 """
225
226
227 class ArgGroup(_Argument):
228 """Node group argument.
229
230 """
231
232
233 class ArgJobId(_Argument):
234 """Job ID argument.
235
236 """
237
238
239 class ArgFile(_Argument):
240 """File path argument.
241
242 """
243
244
245 class ArgCommand(_Argument):
246 """Command argument.
247
248 """
249
250
251 class ArgHost(_Argument):
252 """Host argument.
253
254 """
255
256
257 class ArgOs(_Argument):
258 """OS argument.
259
260 """
261
262
263 class ArgExtStorage(_Argument):
264 """ExtStorage argument.
265
266 """
267
268
269 class ArgFilter(_Argument):
270 """Filter UUID argument.
271
272 """
273
274
275 ARGS_NONE = []
276 ARGS_MANY_INSTANCES = [ArgInstance()]
277 ARGS_MANY_NETWORKS = [ArgNetwork()]
278 ARGS_MANY_NODES = [ArgNode()]
279 ARGS_MANY_GROUPS = [ArgGroup()]
280 ARGS_MANY_FILTERS = [ArgFilter()]
281 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
282 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
283 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
284 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
285 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
286 ARGS_ONE_FILTER = [ArgFilter(min=1, max=1)]
287
288
289 def _ExtractTagsObject(opts, args):
290 """Extract the tag type object.
291
292 Note that this function will modify its args parameter.
293
294 """
295 if not hasattr(opts, "tag_type"):
296 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
297 kind = opts.tag_type
298 if kind == constants.TAG_CLUSTER:
299 retval = kind, ""
300 elif kind in (constants.TAG_NODEGROUP,
301 constants.TAG_NODE,
302 constants.TAG_NETWORK,
303 constants.TAG_INSTANCE):
304 if not args:
305 raise errors.OpPrereqError("no arguments passed to the command",
306 errors.ECODE_INVAL)
307 name = args.pop(0)
308 retval = kind, name
309 else:
310 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
311 return retval
312
313
314 def _ExtendTags(opts, args):
315 """Extend the args if a source file has been given.
316
317 This function will extend the tags with the contents of the file
318 passed in the 'tags_source' attribute of the opts parameter. A file
319 named '-' will be replaced by stdin.
320
321 """
322 fname = opts.tags_source
323 if fname is None:
324 return
325 if fname == "-":
326 new_fh = sys.stdin
327 else:
328 new_fh = open(fname, "r")
329 new_data = []
330 try:
331 # we don't use the nice 'new_data = [line.strip() for line in fh]'
332 # because of python bug 1633941
333 while True:
334 line = new_fh.readline()
335 if not line:
336 break
337 new_data.append(line.strip())
338 finally:
339 new_fh.close()
340 args.extend(new_data)
341
342
343 def ListTags(opts, args):
344 """List the tags on a given object.
345
346 This is a generic implementation that knows how to deal with all
347 three cases of tag objects (cluster, node, instance). The opts
348 argument is expected to contain a tag_type field denoting what
349 object type we work on.
350
351 """
352 kind, name = _ExtractTagsObject(opts, args)
353 cl = GetClient()
354 result = cl.QueryTags(kind, name)
355 result = list(result)
356 result.sort()
357 for tag in result:
358 ToStdout(tag)
359
360
361 def AddTags(opts, args):
362 """Add tags on a given object.
363
364 This is a generic implementation that knows how to deal with all
365 three cases of tag objects (cluster, node, instance). The opts
366 argument is expected to contain a tag_type field denoting what
367 object type we work on.
368
369 """
370 kind, name = _ExtractTagsObject(opts, args)
371 _ExtendTags(opts, args)
372 if not args:
373 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
374 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
375 SubmitOrSend(op, opts)
376
377
378 def RemoveTags(opts, args):
379 """Remove tags from a given object.
380
381 This is a generic implementation that knows how to deal with all
382 three cases of tag objects (cluster, node, instance). The opts
383 argument is expected to contain a tag_type field denoting what
384 object type we work on.
385
386 """
387 kind, name = _ExtractTagsObject(opts, args)
388 _ExtendTags(opts, args)
389 if not args:
390 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
391 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
392 SubmitOrSend(op, opts)
393
394
395 class _ShowUsage(Exception):
396 """Exception class for L{_ParseArgs}.
397
398 """
399 def __init__(self, exit_error):
400 """Initializes instances of this class.
401
402 @type exit_error: bool
403 @param exit_error: Whether to report failure on exit
404
405 """
406 Exception.__init__(self)
407 self.exit_error = exit_error
408
409
410 class _ShowVersion(Exception):
411 """Exception class for L{_ParseArgs}.
412
413 """
414
415
416 def _ParseArgs(binary, argv, commands, aliases, env_override):
417 """Parser for the command line arguments.
418
419 This function parses the arguments and returns the function which
420 must be executed together with its (modified) arguments.
421
422 @param binary: Script name
423 @param argv: Command line arguments
424 @param commands: Dictionary containing command definitions
425 @param aliases: dictionary with command aliases {"alias": "target", ...}
426 @param env_override: list of env variables allowed for default args
427 @raise _ShowUsage: If usage description should be shown
428 @raise _ShowVersion: If version should be shown
429
430 """
431 assert not (env_override - set(commands))
432 assert not (set(aliases.keys()) & set(commands.keys()))
433
434 if len(argv) > 1:
435 cmd = argv[1]
436 else:
437 # No option or command given
438 raise _ShowUsage(exit_error=True)
439
440 if cmd == "--version":
441 raise _ShowVersion()
442 elif cmd == "--help":
443 raise _ShowUsage(exit_error=False)
444 elif not (cmd in commands or cmd in aliases):
445 raise _ShowUsage(exit_error=True)
446
447 # get command, unalias it, and look it up in commands
448 if cmd in aliases:
449 if aliases[cmd] not in commands:
450 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
451 " command '%s'" % (cmd, aliases[cmd]))
452
453 cmd = aliases[cmd]
454
455 if cmd in env_override:
456 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
457 env_args = os.environ.get(args_env_name)
458 if env_args:
459 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
460
461 func, args_def, parser_opts, usage, description = commands[cmd]
462 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
463 description=description,
464 formatter=TitledHelpFormatter(),
465 usage="%%prog %s %s" % (cmd, usage))
466 parser.disable_interspersed_args()
467 options, args = parser.parse_args(args=argv[2:])
468
469 if not _CheckArguments(cmd, args_def, args):
470 return None, None, None
471
472 return func, options, args
473
474
475 def _FormatUsage(binary, commands):
476 """Generates a nice description of all commands.
477
478 @param binary: Script name
479 @param commands: Dictionary containing command definitions
480
481 """
482 # compute the max line length for cmd + usage
483 mlen = min(60, max(map(len, commands)))
484
485 yield "Usage: %s {command} [options...] [argument...]" % binary
486 yield "%s <command> --help to see details, or man %s" % (binary, binary)
487 yield ""
488 yield "Commands:"
489
490 # and format a nice command list
491 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
492 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
493 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
494 for line in help_lines:
495 yield " %-*s %s" % (mlen, "", line)
496
497 yield ""
498
499
500 def _CheckArguments(cmd, args_def, args):
501 """Verifies the arguments using the argument definition.
502
503 Algorithm:
504
505 1. Abort with error if values specified by user but none expected.
506
507 1. For each argument in definition
508
509 1. Keep running count of minimum number of values (min_count)
510 1. Keep running count of maximum number of values (max_count)
511 1. If it has an unlimited number of values
512
513 1. Abort with error if it's not the last argument in the definition
514
515 1. If last argument has limited number of values
516
517 1. Abort with error if number of values doesn't match or is too large
518
519 1. Abort with error if user didn't pass enough values (min_count)
520
521 """
522 if args and not args_def:
523 ToStderr("Error: Command %s expects no arguments", cmd)
524 return False
525
526 min_count = None
527 max_count = None
528 check_max = None
529
530 last_idx = len(args_def) - 1
531
532 for idx, arg in enumerate(args_def):
533 if min_count is None:
534 min_count = arg.min
535 elif arg.min is not None:
536 min_count += arg.min
537
538 if max_count is None:
539 max_count = arg.max
540 elif arg.max is not None:
541 max_count += arg.max
542
543 if idx == last_idx:
544 check_max = (arg.max is not None)
545
546 elif arg.max is None:
547 raise errors.ProgrammerError("Only the last argument can have max=None")
548
549 if check_max:
550 # Command with exact number of arguments
551 if (min_count is not None and max_count is not None and
552 min_count == max_count and len(args) != min_count):
553 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
554 return False
555
556 # Command with limited number of arguments
557 if max_count is not None and len(args) > max_count:
558 ToStderr("Error: Command %s expects only %d argument(s)",
559 cmd, max_count)
560 return False
561
562 # Command with some required arguments
563 if min_count is not None and len(args) < min_count:
564 ToStderr("Error: Command %s expects at least %d argument(s)",
565 cmd, min_count)
566 return False
567
568 return True
569
570
571 def SplitNodeOption(value):
572 """Splits the value of a --node option.
573
574 """
575 if value and ":" in value:
576 return value.split(":", 1)
577 else:
578 return (value, None)
579
580
581 def CalculateOSNames(os_name, os_variants):
582 """Calculates all the names an OS can be called, according to its variants.
583
584 @type os_name: string
585 @param os_name: base name of the os
586 @type os_variants: list or None
587 @param os_variants: list of supported variants
588 @rtype: list
589 @return: list of valid names
590
591 """
592 if os_variants:
593 return ["%s+%s" % (os_name, v) for v in os_variants]
594 else:
595 return [os_name]
596
597
598 def ParseFields(selected, default):
599 """Parses the values of "--field"-like options.
600
601 @type selected: string or None
602 @param selected: User-selected options
603 @type default: list
604 @param default: Default fields
605
606 """
607 if selected is None:
608 return default
609
610 if selected.startswith("+"):
611 return default + selected[1:].split(",")
612
613 return selected.split(",")
614
615
616 def AskUser(text, choices=None):
617 """Ask the user a question.
618
619 @param text: the question to ask
620
621 @param choices: list with elements tuples (input_char, return_value,
622 description); if not given, it will default to: [('y', True,
623 'Perform the operation'), ('n', False, 'Do no do the operation')];
624 note that the '?' char is reserved for help
625
626 @return: one of the return values from the choices list; if input is
627 not possible (i.e. not running with a tty, we return the last
628 entry from the list
629
630 """
631 if choices is None:
632 choices = [("y", True, "Perform the operation"),
633 ("n", False, "Do not perform the operation")]
634 if not choices or not isinstance(choices, list):
635 raise errors.ProgrammerError("Invalid choices argument to AskUser")
636 for entry in choices:
637 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
638 raise errors.ProgrammerError("Invalid choices element to AskUser")
639
640 answer = choices[-1][1]
641 new_text = []
642 for line in text.splitlines():
643 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
644 text = "\n".join(new_text)
645 try:
646 f = file("/dev/tty", "a+")
647 except IOError:
648 return answer
649 try:
650 chars = [entry[0] for entry in choices]
651 chars[-1] = "[%s]" % chars[-1]
652 chars.append("?")
653 maps = dict([(entry[0], entry[1]) for entry in choices])
654 while True:
655 f.write(text)
656 f.write("\n")
657 f.write("/".join(chars))
658 f.write(": ")
659 line = f.readline(2).strip().lower()
660 if line in maps:
661 answer = maps[line]
662 break
663 elif line == "?":
664 for entry in choices:
665 f.write(" %s - %s\n" % (entry[0], entry[2]))
666 f.write("\n")
667 continue
668 finally:
669 f.close()
670 return answer
671
672
673 def SendJob(ops, cl=None):
674 """Function to submit an opcode without waiting for the results.
675
676 @type ops: list
677 @param ops: list of opcodes
678 @type cl: luxi.Client
679 @param cl: the luxi client to use for communicating with the master;
680 if None, a new client will be created
681
682 """
683 if cl is None:
684 cl = GetClient()
685
686 job_id = cl.SubmitJob(ops)
687
688 return job_id
689
690
691 def GenericPollJob(job_id, cbs, report_cbs, cancel_fn=None,
692 update_freq=constants.DEFAULT_WFJC_TIMEOUT):
693 """Generic job-polling function.
694
695 @type job_id: number
696 @param job_id: Job ID
697 @type cbs: Instance of L{JobPollCbBase}
698 @param cbs: Data callbacks
699 @type report_cbs: Instance of L{JobPollReportCbBase}
700 @param report_cbs: Reporting callbacks
701 @type cancel_fn: Function returning a boolean
702 @param cancel_fn: Function to check if we should cancel the running job
703 @type update_freq: int/long
704 @param update_freq: number of seconds between each WFJC reports
705 @return: the opresult of the job
706 @raise errors.JobLost: If job can't be found
707 @raise errors.JobCanceled: If job is canceled
708 @raise errors.OpExecError: If job didn't succeed
709
710 """
711 prev_job_info = None
712 prev_logmsg_serial = None
713
714 status = None
715 should_cancel = False
716
717 if update_freq <= 0:
718 raise errors.ParameterError("Update frequency must be a positive number")
719
720 while True:
721 if cancel_fn:
722 timer = 0
723 while timer < update_freq:
724 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
725 prev_logmsg_serial,
726 timeout=constants.CLI_WFJC_FREQUENCY)
727 should_cancel = cancel_fn()
728 if should_cancel or not result or result != constants.JOB_NOTCHANGED:
729 break
730 timer += constants.CLI_WFJC_FREQUENCY
731 else:
732 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
733 prev_logmsg_serial, timeout=update_freq)
734 if not result:
735 # job not found, go away!
736 raise errors.JobLost("Job with id %s lost" % job_id)
737
738 if should_cancel:
739 logging.info("Job %s canceled because the client timed out.", job_id)
740 cbs.CancelJob(job_id)
741 raise errors.JobCanceled("Job was canceled")
742
743 if result == constants.JOB_NOTCHANGED:
744 report_cbs.ReportNotChanged(job_id, status)
745 # Wait again
746 continue
747
748 # Split result, a tuple of (field values, log entries)
749 (job_info, log_entries) = result
750 (status, ) = job_info
751
752 if log_entries:
753 for log_entry in log_entries:
754 (serial, timestamp, log_type, message) = log_entry
755 report_cbs.ReportLogMessage(job_id, serial, timestamp,
756 log_type, message)
757 prev_logmsg_serial = max(prev_logmsg_serial, serial)
758
759 # TODO: Handle canceled and archived jobs
760 elif status in (constants.JOB_STATUS_SUCCESS,
761 constants.JOB_STATUS_ERROR,
762 constants.JOB_STATUS_CANCELING,
763 constants.JOB_STATUS_CANCELED):
764 break
765
766 prev_job_info = job_info
767
768 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
769 if not jobs:
770 raise errors.JobLost("Job with id %s lost" % job_id)
771
772 status, opstatus, result = jobs[0]
773
774 if status == constants.JOB_STATUS_SUCCESS:
775 return result
776
777 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
778 raise errors.JobCanceled("Job was canceled")
779
780 has_ok = False
781 for idx, (status, msg) in enumerate(zip(opstatus, result)):
782 if status == constants.OP_STATUS_SUCCESS:
783 has_ok = True
784 elif status == constants.OP_STATUS_ERROR:
785 errors.MaybeRaise(msg)
786
787 if has_ok:
788 raise errors.OpExecError("partial failure (opcode %d): %s" %
789 (idx, msg))
790
791 raise errors.OpExecError(str(msg))
792
793 # default failure mode
794 raise errors.OpExecError(result)
795
796
797 class JobPollCbBase(object):
798 """Base class for L{GenericPollJob} callbacks.
799
800 """
801 def __init__(self):
802 """Initializes this class.
803
804 """
805
806 def WaitForJobChangeOnce(self, job_id, fields,
807 prev_job_info, prev_log_serial,
808 timeout=constants.DEFAULT_WFJC_TIMEOUT):
809 """Waits for changes on a job.
810
811 """
812 raise NotImplementedError()
813
814 def QueryJobs(self, job_ids, fields):
815 """Returns the selected fields for the selected job IDs.
816
817 @type job_ids: list of numbers
818 @param job_ids: Job IDs
819 @type fields: list of strings
820 @param fields: Fields
821
822 """
823 raise NotImplementedError()
824
825 def CancelJob(self, job_id):
826 """Cancels a currently running job.
827
828 @type job_id: number
829 @param job_id: The ID of the Job we want to cancel
830
831 """
832 raise NotImplementedError()
833
834
835 class JobPollReportCbBase(object):
836 """Base class for L{GenericPollJob} reporting callbacks.
837
838 """
839 def __init__(self):
840 """Initializes this class.
841
842 """
843
844 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
845 """Handles a log message.
846
847 """
848 raise NotImplementedError()
849
850 def ReportNotChanged(self, job_id, status):
851 """Called for if a job hasn't changed in a while.
852
853 @type job_id: number
854 @param job_id: Job ID
855 @type status: string or None
856 @param status: Job status if available
857
858 """
859 raise NotImplementedError()
860
861
862 class _LuxiJobPollCb(JobPollCbBase):
863 def __init__(self, cl):
864 """Initializes this class.
865
866 """
867 JobPollCbBase.__init__(self)
868 self.cl = cl
869
870 def WaitForJobChangeOnce(self, job_id, fields,
871 prev_job_info, prev_log_serial,
872 timeout=constants.DEFAULT_WFJC_TIMEOUT):
873 """Waits for changes on a job.
874
875 """
876 return self.cl.WaitForJobChangeOnce(job_id, fields,
877 prev_job_info, prev_log_serial,
878 timeout=timeout)
879
880 def QueryJobs(self, job_ids, fields):
881 """Returns the selected fields for the selected job IDs.
882
883 """
884 return self.cl.QueryJobs(job_ids, fields)
885
886 def CancelJob(self, job_id):
887 """Cancels a currently running job.
888
889 """
890 return self.cl.CancelJob(job_id)
891
892
893 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
894 def __init__(self, feedback_fn):
895 """Initializes this class.
896
897 """
898 JobPollReportCbBase.__init__(self)
899
900 self.feedback_fn = feedback_fn
901
902 assert callable(feedback_fn)
903
904 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
905 """Handles a log message.
906
907 """
908 self.feedback_fn((timestamp, log_type, log_msg))
909
910 def ReportNotChanged(self, job_id, status):
911 """Called if a job hasn't changed in a while.
912
913 """
914 # Ignore
915
916
917 class StdioJobPollReportCb(JobPollReportCbBase):
918 def __init__(self):
919 """Initializes this class.
920
921 """
922 JobPollReportCbBase.__init__(self)
923
924 self.notified_queued = False
925 self.notified_waitlock = False
926
927 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
928 """Handles a log message.
929
930 """
931 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
932 FormatLogMessage(log_type, log_msg))
933
934 def ReportNotChanged(self, job_id, status):
935 """Called if a job hasn't changed in a while.
936
937 """
938 if status is None:
939 return
940
941 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
942 ToStderr("Job %s is waiting in queue", job_id)
943 self.notified_queued = True
944
945 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
946 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
947 self.notified_waitlock = True
948
949
950 def FormatLogMessage(log_type, log_msg):
951 """Formats a job message according to its type.
952
953 """
954 if log_type != constants.ELOG_MESSAGE:
955 log_msg = str(log_msg)
956
957 return utils.SafeEncode(log_msg)
958
959
960 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None, cancel_fn=None,
961 update_freq=constants.DEFAULT_WFJC_TIMEOUT):
962 """Function to poll for the result of a job.
963
964 @type job_id: job identified
965 @param job_id: the job to poll for results
966 @type cl: luxi.Client
967 @param cl: the luxi client to use for communicating with the master;
968 if None, a new client will be created
969 @type cancel_fn: Function returning a boolean
970 @param cancel_fn: Function to check if we should cancel the running job
971 @type update_freq: int/long
972 @param update_freq: number of seconds between each WFJC report
973
974 """
975 if cl is None:
976 cl = GetClient()
977
978 if reporter is None:
979 if feedback_fn:
980 reporter = FeedbackFnJobPollReportCb(feedback_fn)
981 else:
982 reporter = StdioJobPollReportCb()
983 elif feedback_fn:
984 raise errors.ProgrammerError("Can't specify reporter and feedback function")
985
986 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter,
987 cancel_fn=cancel_fn, update_freq=update_freq)
988
989
990 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
991 """Legacy function to submit an opcode.
992
993 This is just a simple wrapper over the construction of the processor
994 instance. It should be extended to better handle feedback and
995 interaction functions.
996
997 """
998 if cl is None:
999 cl = GetClient()
1000
1001 SetGenericOpcodeOpts([op], opts)
1002
1003 job_id = SendJob([op], cl=cl)
1004 if hasattr(opts, "print_jobid") and opts.print_jobid:
1005 ToStdout("%d" % job_id)
1006
1007 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1008 reporter=reporter)
1009
1010 return op_results[0]
1011
1012
1013 def SubmitOpCodeToDrainedQueue(op):
1014 """Forcefully insert a job in the queue, even if it is drained.
1015
1016 """
1017 cl = GetClient()
1018 job_id = cl.SubmitJobToDrainedQueue([op])
1019 op_results = PollJob(job_id, cl=cl)
1020 return op_results[0]
1021
1022
1023 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1024 """Wrapper around SubmitOpCode or SendJob.
1025
1026 This function will decide, based on the 'opts' parameter, whether to
1027 submit and wait for the result of the opcode (and return it), or
1028 whether to just send the job and print its identifier. It is used in
1029 order to simplify the implementation of the '--submit' option.
1030
1031 It will also process the opcodes if we're sending the via SendJob
1032 (otherwise SubmitOpCode does it).
1033
1034 """
1035 if opts and opts.submit_only:
1036 job = [op]
1037 SetGenericOpcodeOpts(job, opts)
1038 job_id = SendJob(job, cl=cl)
1039 if opts.print_jobid:
1040 ToStdout("%d" % job_id)
1041 raise errors.JobSubmittedException(job_id)
1042 else:
1043 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1044
1045
1046 def _InitReasonTrail(op, opts):
1047 """Builds the first part of the reason trail
1048
1049 Builds the initial part of the reason trail, adding the user provided reason
1050 (if it exists) and the name of the command starting the operation.
1051
1052 @param op: the opcode the reason trail will be added to
1053 @param opts: the command line options selected by the user
1054
1055 """
1056 assert len(sys.argv) >= 2
1057 trail = []
1058
1059 if opts.reason:
1060 trail.append((constants.OPCODE_REASON_SRC_USER,
1061 opts.reason,
1062 utils.EpochNano()))
1063
1064 binary = os.path.basename(sys.argv[0])
1065 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
1066 command = sys.argv[1]
1067 trail.append((source, command, utils.EpochNano()))
1068 op.reason = trail
1069
1070
1071 def SetGenericOpcodeOpts(opcode_list, options):
1072 """Processor for generic options.
1073
1074 This function updates the given opcodes based on generic command
1075 line options (like debug, dry-run, etc.).
1076
1077 @param opcode_list: list of opcodes
1078 @param options: command line options or None
1079 @return: None (in-place modification)
1080
1081 """
1082 if not options:
1083 return
1084 for op in opcode_list:
1085 op.debug_level = options.debug
1086 if hasattr(options, "dry_run"):
1087 op.dry_run = options.dry_run
1088 if getattr(options, "priority", None) is not None:
1089 op.priority = options.priority
1090 _InitReasonTrail(op, options)
1091
1092
1093 def FormatError(err):
1094 """Return a formatted error message for a given error.
1095
1096 This function takes an exception instance and returns a tuple
1097 consisting of two values: first, the recommended exit code, and
1098 second, a string describing the error message (not
1099 newline-terminated).
1100
1101 """
1102 retcode = 1
1103 obuf = StringIO()
1104 msg = str(err)
1105 if isinstance(err, errors.ConfigurationError):
1106 txt = "Corrupt configuration file: %s" % msg
1107 logging.error(txt)
1108 obuf.write(txt + "\n")
1109 obuf.write("Aborting.")
1110 retcode = 2
1111 elif isinstance(err, errors.HooksAbort):
1112 obuf.write("Failure: hooks execution failed:\n")
1113 for node, script, out in err.args[0]:
1114 if out:
1115 obuf.write(" node: %s, script: %s, output: %s\n" %
1116 (node, script, out))
1117 else:
1118 obuf.write(" node: %s, script: %s (no output)\n" %
1119 (node, script))
1120 elif isinstance(err, errors.HooksFailure):
1121 obuf.write("Failure: hooks general failure: %s" % msg)
1122 elif isinstance(err, errors.ResolverError):
1123 this_host = netutils.Hostname.GetSysName()
1124 if err.args[0] == this_host:
1125 msg = "Failure: can't resolve my own hostname ('%s')"
1126 else:
1127 msg = "Failure: can't resolve hostname '%s'"
1128 obuf.write(msg % err.args[0])
1129 elif isinstance(err, errors.OpPrereqError):
1130 if len(err.args) == 2:
1131 obuf.write("Failure: prerequisites not met for this"
1132 " operation:\nerror type: %s, error details:\n%s" %
1133 (err.args[1], err.args[0]))
1134 else:
1135 obuf.write("Failure: prerequisites not met for this"
1136 " operation:\n%s" % msg)
1137 elif isinstance(err, errors.OpExecError):
1138 obuf.write("Failure: command execution error:\n%s" % msg)
1139 elif isinstance(err, errors.TagError):
1140 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1141 elif isinstance(err, errors.JobQueueDrainError):
1142 obuf.write("Failure: the job queue is marked for drain and doesn't"
1143 " accept new requests\n")
1144 elif isinstance(err, errors.JobQueueFull):
1145 obuf.write("Failure: the job queue is full and doesn't accept new"
1146 " job submissions until old jobs are archived\n")
1147 elif isinstance(err, errors.TypeEnforcementError):
1148 obuf.write("Parameter Error: %s" % msg)
1149 elif isinstance(err, errors.ParameterError):
1150 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1151 elif isinstance(err, rpcerr.NoMasterError):
1152 if err.args[0] == pathutils.MASTER_SOCKET:
1153 daemon = "the master daemon"
1154 elif err.args[0] == pathutils.QUERY_SOCKET:
1155 daemon = "the config daemon"
1156 else:
1157 daemon = "socket '%s'" % str(err.args[0])
1158 obuf.write("Cannot communicate with %s.\nIs the process running"
1159 " and listening for connections?" % daemon)
1160 elif isinstance(err, rpcerr.TimeoutError):
1161 obuf.write("Timeout while talking to the master daemon. Jobs might have"
1162 " been submitted and will continue to run even if the call"
1163 " timed out. Useful commands in this situation are \"gnt-job"
1164 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1165 obuf.write(msg)
1166 elif isinstance(err, rpcerr.PermissionError):
1167 obuf.write("It seems you don't have permissions to connect to the"
1168 " master daemon.\nPlease retry as a different user.")
1169 elif isinstance(err, rpcerr.ProtocolError):
1170 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1171 "%s" % msg)
1172 elif isinstance(err, errors.JobLost):
1173 obuf.write("Error checking job status: %s" % msg)
1174 elif isinstance(err, errors.QueryFilterParseError):
1175 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1176 obuf.write("\n".join(err.GetDetails()))
1177 elif isinstance(err, errors.GenericError):
1178 obuf.write("Unhandled Ganeti error: %s" % msg)
1179 elif isinstance(err, errors.JobSubmittedException):
1180 obuf.write("JobID: %s\n" % err.args[0])
1181 retcode = 0
1182 else:
1183 obuf.write("Unhandled exception: %s" % msg)
1184 return retcode, obuf.getvalue().rstrip("\n")
1185
1186
1187 def GenericMain(commands, override=None, aliases=None,
1188 env_override=frozenset()):
1189 """Generic main function for all the gnt-* commands.
1190
1191 @param commands: a dictionary with a special structure, see the design doc
1192 for command line handling.
1193 @param override: if not None, we expect a dictionary with keys that will
1194 override command line options; this can be used to pass
1195 options from the scripts to generic functions
1196 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1197 @param env_override: list of environment names which are allowed to submit
1198 default args for commands
1199
1200 """
1201 # save the program name and the entire command line for later logging
1202 if sys.argv:
1203 binary = os.path.basename(sys.argv[0])
1204 if not binary:
1205 binary = sys.argv[0]
1206
1207 if len(sys.argv) >= 2:
1208 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
1209 else:
1210 logname = binary
1211
1212 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
1213 else:
1214 binary = "<unknown program>"
1215 cmdline = "<unknown>"
1216
1217 if aliases is None:
1218 aliases = {}
1219
1220 try:
1221 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
1222 env_override)
1223 except _ShowVersion:
1224 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1225 constants.RELEASE_VERSION)
1226 return constants.EXIT_SUCCESS
1227 except _ShowUsage, err:
1228 for line in _FormatUsage(binary, commands):
1229 ToStdout(line)
1230
1231 if err.exit_error:
1232 return constants.EXIT_FAILURE
1233 else:
1234 return constants.EXIT_SUCCESS
1235 except errors.ParameterError, err:
1236 result, err_msg = FormatError(err)
1237 ToStderr(err_msg)
1238 return 1
1239
1240 if func is None: # parse error
1241 return 1
1242
1243 if override is not None:
1244 for key, val in override.iteritems():
1245 setattr(options, key, val)
1246
1247 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
1248 stderr_logging=True)
1249
1250 logging.debug("Command line: %s", cmdline)
1251
1252 try:
1253 result = func(options, args)
1254 except (errors.GenericError, rpcerr.ProtocolError,
1255 errors.JobSubmittedException), err:
1256 result, err_msg = FormatError(err)
1257 logging.exception("Error during command processing")
1258 ToStderr(err_msg)
1259 except KeyboardInterrupt:
1260 result = constants.EXIT_FAILURE
1261 ToStderr("Aborted. Note that if the operation created any jobs, they"
1262 " might have been submitted and"
1263 " will continue to run in the background.")
1264 except IOError, err:
1265 if err.errno == errno.EPIPE:
1266 # our terminal went away, we'll exit
1267 sys.exit(constants.EXIT_FAILURE)
1268 else:
1269 raise
1270
1271 return result
1272
1273
1274 def ParseNicOption(optvalue):
1275 """Parses the value of the --net option(s).
1276
1277 """
1278 try:
1279 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1280 except (TypeError, ValueError), err:
1281 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
1282 errors.ECODE_INVAL)
1283
1284 nics = [{}] * nic_max
1285 for nidx, ndict in optvalue:
1286 nidx = int(nidx)
1287
1288 if not isinstance(ndict, dict):
1289 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1290 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
1291
1292 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1293
1294 nics[nidx] = ndict
1295
1296 return nics
1297
1298
1299 def FixHvParams(hvparams):
1300 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
1301 # comma to space because commas cannot be accepted on the command line
1302 # (they already act as the separator between different hvparams). Still,
1303 # RAPI should be able to accept commas for backwards compatibility.
1304 # Therefore, we convert spaces into commas here, and we keep the old
1305 # parsing logic everywhere else.
1306 try:
1307 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
1308 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
1309 except KeyError:
1310 #No usb_devices, no modification required
1311 pass
1312
1313
1314 def GenericInstanceCreate(mode, opts, args):
1315 """Add an instance to the cluster via either creation or import.
1316
1317 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1318 @param opts: the command line options selected by the user
1319 @type args: list
1320 @param args: should contain only one element, the new instance name
1321 @rtype: int
1322 @return: the desired exit code
1323
1324 """
1325 instance = args[0]
1326 forthcoming = opts.ensure_value("forthcoming", False)
1327 commit = opts.ensure_value("commit", False)
1328
1329 if forthcoming and commit:
1330 raise errors.OpPrereqError("Creating an instance only forthcoming and"
1331 " commiting it are mutally exclusive",
1332 errors.ECODE_INVAL)
1333
1334 (pnode, snode) = SplitNodeOption(opts.node)
1335
1336 hypervisor = None
1337 hvparams = {}
1338 if opts.hypervisor:
1339 hypervisor, hvparams = opts.hypervisor
1340
1341 if opts.nics:
1342 nics = ParseNicOption(opts.nics)
1343 elif opts.no_nics:
1344 # no nics
1345 nics = []
1346 elif mode == constants.INSTANCE_CREATE:
1347 # default of one nic, all auto
1348 nics = [{}]
1349 else:
1350 # mode == import
1351 nics = []
1352
1353 if opts.disk_template == constants.DT_DISKLESS:
1354 if opts.disks or opts.sd_size is not None:
1355 raise errors.OpPrereqError("Diskless instance but disk"
1356 " information passed", errors.ECODE_INVAL)
1357 disks = []
1358 else:
1359 if (not opts.disks and not opts.sd_size
1360 and mode == constants.INSTANCE_CREATE):
1361 raise errors.OpPrereqError("No disk information specified",
1362 errors.ECODE_INVAL)
1363 if opts.disks and opts.sd_size is not None:
1364 raise errors.OpPrereqError("Please use either the '--disk' or"
1365 " '-s' option", errors.ECODE_INVAL)
1366 if opts.sd_size is not None:
1367 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
1368
1369 if opts.disks:
1370 try:
1371 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1372 except ValueError, err:
1373 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
1374 errors.ECODE_INVAL)
1375 disks = [{}] * disk_max
1376 else:
1377 disks = []
1378 for didx, ddict in opts.disks:
1379 didx = int(didx)
1380 if not isinstance(ddict, dict):
1381 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1382 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1383 elif constants.IDISK_SIZE in ddict:
1384 if constants.IDISK_ADOPT in ddict:
1385 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1386 " (disk %d)" % didx, errors.ECODE_INVAL)
1387 try:
1388 ddict[constants.IDISK_SIZE] = \
1389 utils.ParseUnit(ddict[constants.IDISK_SIZE])
1390 except ValueError, err:
1391 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1392 (didx, err), errors.ECODE_INVAL)
1393 elif constants.IDISK_ADOPT in ddict:
1394 if constants.IDISK_SPINDLES in ddict:
1395 raise errors.OpPrereqError("spindles is not a valid option when"
1396 " adopting a disk", errors.ECODE_INVAL)
1397 if mode == constants.INSTANCE_IMPORT:
1398 raise errors.OpPrereqError("Disk adoption not allowed for instance"
1399 " import", errors.ECODE_INVAL)
1400 ddict[constants.IDISK_SIZE] = 0
1401 else:
1402 raise errors.OpPrereqError("Missing size or adoption source for"
1403 " disk %d" % didx, errors.ECODE_INVAL)
1404 if constants.IDISK_SPINDLES in ddict:
1405 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
1406
1407 disks[didx] = ddict
1408
1409 if opts.tags is not None:
1410 tags = opts.tags.split(",")
1411 else:
1412 tags = []
1413
1414 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
1415 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1416 FixHvParams(hvparams)
1417
1418 osparams_private = opts.osparams_private or serializer.PrivateDict()
1419 osparams_secret = opts.osparams_secret or serializer.PrivateDict()
1420
1421 helper_startup_timeout = opts.helper_startup_timeout
1422 helper_shutdown_timeout = opts.helper_shutdown_timeout
1423
1424 if mode == constants.INSTANCE_CREATE:
1425 start = opts.start
1426 os_type = opts.os
1427 force_variant = opts.force_variant
1428 src_node = None
1429 src_path = None
1430 no_install = opts.no_install
1431 identify_defaults = False
1432 compress = constants.IEC_NONE
1433 if opts.instance_communication is None:
1434 instance_communication = False
1435 else:
1436 instance_communication = opts.instance_communication
1437 elif mode == constants.INSTANCE_IMPORT:
1438 if forthcoming:
1439 raise errors.OpPrereqError("forthcoming instances can only be created,"
1440 " not imported")
1441 start = False
1442 os_type = None
1443 force_variant = False
1444 src_node = opts.src_node
1445 src_path = opts.src_dir
1446 no_install = None
1447 identify_defaults = opts.identify_defaults
1448 compress = opts.compress
1449 instance_communication = False
1450 else:
1451 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1452
1453 op = opcodes.OpInstanceCreate(
1454 forthcoming=forthcoming,
1455 commit=commit,
1456 instance_name=instance,
1457 disks=disks,
1458 disk_template=opts.disk_template,
1459 group_name=opts.nodegroup,
1460 nics=nics,
1461 conflicts_check=opts.conflicts_check,
1462 pnode=pnode, snode=snode,
1463 ip_check=opts.ip_check,
1464 name_check=opts.name_check,
1465 wait_for_sync=opts.wait_for_sync,
1466 file_storage_dir=opts.file_storage_dir,
1467 file_driver=opts.file_driver,
1468 iallocator=opts.iallocator,
1469 hypervisor=hypervisor,
1470 hvparams=hvparams,
1471 beparams=opts.beparams,
1472 osparams=opts.osparams,
1473 osparams_private=osparams_private,
1474 osparams_secret=osparams_secret,
1475 mode=mode,
1476 opportunistic_locking=opts.opportunistic_locking,
1477 start=start,
1478 os_type=os_type,
1479 force_variant=force_variant,
1480 src_node=src_node,
1481 src_path=src_path,
1482 compress=compress,
1483 tags=tags,
1484 no_install=no_install,
1485 identify_defaults=identify_defaults,
1486 ignore_ipolicy=opts.ignore_ipolicy,
1487 instance_communication=instance_communication,
1488 helper_startup_timeout=helper_startup_timeout,
1489 helper_shutdown_timeout=helper_shutdown_timeout)
1490
1491 SubmitOrSend(op, opts)
1492 return 0
1493
1494
1495 class _RunWhileDaemonsStoppedHelper(object):
1496 """Helper class for L{RunWhileDaemonsStopped} to simplify state management
1497
1498 """
1499 def __init__(self, feedback_fn, cluster_name, master_node,
1500 online_nodes, ssh_ports, exclude_daemons, debug,
1501 verbose):
1502 """Initializes this class.
1503
1504 @type feedback_fn: callable
1505 @param feedback_fn: Feedback function
1506 @type cluster_name: string
1507 @param cluster_name: Cluster name
1508 @type master_node: string
1509 @param master_node Master node name
1510 @type online_nodes: list
1511 @param online_nodes: List of names of online nodes
1512 @type ssh_ports: list
1513 @param ssh_ports: List of SSH ports of online nodes
1514 @type exclude_daemons: list of string
1515 @param exclude_daemons: list of daemons that will be restarted on master
1516 after all others are shutdown
1517 @type debug: boolean
1518 @param debug: show debug output
1519 @type verbose: boolesn
1520 @param verbose: show verbose output
1521
1522 """
1523 self.feedback_fn = feedback_fn
1524 self.cluster_name = cluster_name
1525 self.master_node = master_node
1526 self.online_nodes = online_nodes
1527 self.ssh_ports = dict(zip(online_nodes, ssh_ports))
1528
1529 self.ssh = ssh.SshRunner(self.cluster_name)
1530
1531 self.nonmaster_nodes = [name for name in online_nodes
1532 if name != master_node]
1533
1534 self.exclude_daemons = exclude_daemons
1535 self.debug = debug
1536 self.verbose = verbose
1537
1538 assert self.master_node not in self.nonmaster_nodes
1539
1540 def _RunCmd(self, node_name, cmd):
1541 """Runs a command on the local or a remote machine.
1542
1543 @type node_name: string
1544 @param node_name: Machine name
1545 @type cmd: list
1546 @param cmd: Command
1547
1548 """
1549 if node_name is None or node_name == self.master_node:
1550 # No need to use SSH
1551 result = utils.RunCmd(cmd)
1552 else:
1553 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
1554 utils.ShellQuoteArgs(cmd),
1555 port=self.ssh_ports[node_name])
1556
1557 if result.failed:
1558 errmsg = ["Failed to run command %s" % result.cmd]
1559 if node_name:
1560 errmsg.append("on node %s" % node_name)
1561 errmsg.append(": exitcode %s and error %s" %
1562 (result.exit_code, result.output))
1563 raise errors.OpExecError(" ".join(errmsg))
1564
1565 def Call(self, fn, *args):
1566 """Call function while all daemons are stopped.
1567
1568 @type fn: callable
1569 @param fn: Function to be called
1570
1571 """
1572 # Pause watcher by acquiring an exclusive lock on watcher state file
1573 self.feedback_fn("Blocking watcher")
1574 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
1575 try:
1576 # TODO: Currently, this just blocks. There's no timeout.
1577 # TODO: Should it be a shared lock?
1578 watcher_block.Exclusive(blocking=True)
1579
1580 # Stop master daemons, so that no new jobs can come in and all running
1581 # ones are finished
1582 self.feedback_fn("Stopping master daemons")
1583 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
1584 try:
1585 # Stop daemons on all nodes
1586 online_nodes = [self.master_node] + [n for n in self.online_nodes
1587 if n != self.master_node]
1588 for node_name in online_nodes:
1589 self.feedback_fn("Stopping daemons on %s" % node_name)
1590 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
1591 # Starting any daemons listed as exception
1592 if node_name == self.master_node:
1593 for daemon in self.exclude_daemons:
1594 self.feedback_fn("Starting daemon '%s' on %s" % (daemon,
1595 node_name))
1596 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start", daemon])
1597
1598 # All daemons are shut down now
1599 try:
1600 return fn(self, *args)
1601 except Exception, err:
1602 _, errmsg = FormatError(err)
1603 logging.exception("Caught exception")
1604 self.feedback_fn(errmsg)
1605 raise
1606 finally:
1607 # Start cluster again, master node last
1608 for node_name in self.nonmaster_nodes + [self.master_node]:
1609 # Stopping any daemons listed as exception.
1610 # This might look unnecessary, but it makes sure that daemon-util
1611 # starts all daemons in the right order.
1612 if node_name == self.master_node:
1613 self.exclude_daemons.reverse()
1614 for daemon in self.exclude_daemons:
1615 self.feedback_fn("Stopping daemon '%s' on %s" % (daemon,
1616 node_name))
1617 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop", daemon])
1618 self.feedback_fn("Starting daemons on %s" % node_name)
1619 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
1620
1621 finally:
1622 # Resume watcher
1623 watcher_block.Close()
1624
1625
1626 def RunWhileDaemonsStopped(feedback_fn, exclude_daemons, fn, *args, **kwargs):
1627 """Calls a function while all cluster daemons are stopped.
1628
1629 @type feedback_fn: callable
1630 @param feedback_fn: Feedback function
1631 @type exclude_daemons: list of string
1632 @param exclude_daemons: list of daemons that stopped, but immediately
1633 restarted on the master to be available when calling
1634 'fn'. If None, all daemons will be stopped and none
1635 will be started before calling 'fn'.
1636 @type fn: callable
1637 @param fn: Function to be called when daemons are stopped
1638
1639 """
1640 feedback_fn("Gathering cluster information")
1641
1642 # This ensures we're running on the master daemon
1643 cl = GetClient()
1644
1645 (cluster_name, master_node) = \
1646 cl.QueryConfigValues(["cluster_name", "master_node"])
1647
1648 online_nodes = GetOnlineNodes([], cl=cl)
1649 ssh_ports = GetNodesSshPorts(online_nodes, cl)
1650
1651 # Don't keep a reference to the client. The master daemon will go away.
1652 del cl
1653
1654 assert master_node in online_nodes
1655 if exclude_daemons is None:
1656 exclude_daemons = []
1657
1658 debug = kwargs.get("debug", False)
1659 verbose = kwargs.get("verbose", False)
1660
1661 return _RunWhileDaemonsStoppedHelper(
1662 feedback_fn, cluster_name, master_node, online_nodes, ssh_ports,
1663 exclude_daemons, debug, verbose).Call(fn, *args)
1664
1665
1666 def RunWhileClusterStopped(feedback_fn, fn, *args):
1667 """Calls a function while all cluster daemons are stopped.
1668
1669 @type feedback_fn: callable
1670 @param feedback_fn: Feedback function
1671 @type fn: callable
1672 @param fn: Function to be called when daemons are stopped
1673
1674 """
1675 RunWhileDaemonsStopped(feedback_fn, None, fn, *args)
1676
1677
1678 def GenerateTable(headers, fields, separator, data,
1679 numfields=None, unitfields=None,
1680 units=None):
1681 """Prints a table with headers and different fields.
1682
1683 @type headers: dict
1684 @param headers: dictionary mapping field names to headers for
1685 the table
1686 @type fields: list
1687 @param fields: the field names corresponding to each row in
1688 the data field
1689 @param separator: the separator to be used; if this is None,
1690 the default 'smart' algorithm is used which computes optimal
1691 field width, otherwise just the separator is used between
1692 each field
1693 @type data: list
1694 @param data: a list of lists, each sublist being one row to be output
1695 @type numfields: list
1696 @param numfields: a list with the fields that hold numeric
1697 values and thus should be right-aligned
1698 @type unitfields: list
1699 @param unitfields: a list with the fields that hold numeric
1700 values that should be formatted with the units field
1701 @type units: string or None
1702 @param units: the units we should use for formatting, or None for
1703 automatic choice (human-readable for non-separator usage, otherwise
1704 megabytes); this is a one-letter string
1705
1706 """
1707 if units is None:
1708 if separator:
1709 units = "m"
1710 else:
1711 units = "h"
1712
1713 if numfields is None:
1714 numfields = []
1715 if unitfields is None:
1716 unitfields = []
1717
1718 numfields = utils.FieldSet(*numfields)
1719 unitfields = utils.FieldSet(*unitfields)
1720
1721 format_fields = []
1722 for field in fields:
1723 if headers and field not in headers:
1724 # TODO: handle better unknown fields (either revert to old
1725 # style of raising exception, or deal more intelligently with
1726 # variable fields)
1727 headers[field] = field
1728 if separator is not None:
1729 format_fields.append("%s")
1730 elif numfields.Matches(field):
1731 format_fields.append("%*s")
1732 else:
1733 format_fields.append("%-*s")
1734
1735 if separator is None:
1736 mlens = [0 for name in fields]
1737 format_str = " ".join(format_fields)
1738 else:
1739 format_str = separator.replace("%", "%%").join(format_fields)
1740
1741 for row in data:
1742 if row is None:
1743 continue
1744 for idx, val in enumerate(row):
1745 if unitfields.Matches(fields[idx]):
1746 try:
1747 val = int(val)
1748 except (TypeError, ValueError):
1749 pass
1750 else:
1751 val = row[idx] = utils.FormatUnit(val, units)
1752 val = row[idx] = str(val)
1753 if separator is None:
1754 mlens[idx] = max(mlens[idx], len(val))
1755
1756 result = []
1757 if headers:
1758 args = []
1759 for idx, name in enumerate(fields):
1760 hdr = headers[name]
1761 if separator is None:
1762 mlens[idx] = max(mlens[idx], len(hdr))
1763 args.append(mlens[idx])
1764 args.append(hdr)
1765 result.append(format_str % tuple(args))
1766
1767 if separator is None:
1768 assert len(mlens) == len(fields)
1769
1770 if fields and not numfields.Matches(fields[-1]):
1771 mlens[-1] = 0
1772
1773 for line in data:
1774 args = []
1775 if line is None:
1776 line = ["-" for _ in fields]
1777 for idx in range(len(fields)):
1778 if separator is None:
1779 args.append(mlens[idx])
1780 args.append(line[idx])
1781 result.append(format_str % tuple(args))
1782
1783 return result
1784
1785
1786 def _FormatBool(value):
1787 """Formats a boolean value as a string.
1788
1789 """
1790 if value:
1791 return "Y"
1792 return "N"
1793
1794
1795 #: Default formatting for query results; (callback, align right)
1796 _DEFAULT_FORMAT_QUERY = {
1797 constants.QFT_TEXT: (str, False),
1798 constants.QFT_BOOL: (_FormatBool, False),
1799 constants.QFT_NUMBER: (str, True),
1800 constants.QFT_NUMBER_FLOAT: (str, True),
1801 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
1802 constants.QFT_OTHER: (str, False),
1803 constants.QFT_UNKNOWN: (str, False),
1804 }
1805
1806
1807 def _GetColumnFormatter(fdef, override, unit):
1808 """Returns formatting function for a field.
1809
1810 @type fdef: L{objects.QueryFieldDefinition}
1811 @type override: dict
1812 @param override: Dictionary for overriding field formatting functions,
1813 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
1814 @type unit: string
1815 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
1816 @rtype: tuple; (callable, bool)
1817 @return: Returns the function to format a value (takes one parameter) and a
1818 boolean for aligning the value on the right-hand side
1819
1820 """
1821 fmt = override.get(fdef.name, None)
1822 if fmt is not None:
1823 return fmt
1824
1825 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
1826
1827 if fdef.kind == constants.QFT_UNIT:
1828 # Can't keep this information in the static dictionary
1829 return (lambda value: utils.FormatUnit(value, unit), True)
1830
1831 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
1832 if fmt is not None:
1833 return fmt
1834
1835 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
1836
1837
1838 class _QueryColumnFormatter(object):
1839 """Callable class for formatting fields of a query.
1840
1841 """
1842 def __init__(self, fn, status_fn, verbose):
1843 """Initializes this class.
1844
1845 @type fn: callable
1846 @param fn: Formatting function
1847 @type status_fn: callable
1848 @param status_fn: Function to report fields' status
1849 @type verbose: boolean
1850 @param verbose: whether to use verbose field descriptions or not
1851
1852 """
1853 self._fn = fn
1854 self._status_fn = status_fn
1855 self._verbose = verbose
1856
1857 def __call__(self, data):
1858 """Returns a field's string representation.
1859
1860 """
1861 (status, value) = data
1862
1863 # Report status
1864 self._status_fn(status)
1865
1866 if status == constants.RS_NORMAL:
1867 return self._fn(value)
1868
1869 assert value is None, \
1870 "Found value %r for abnormal status %s" % (value, status)
1871
1872 return FormatResultError(status, self._verbose)
1873
1874
1875 def FormatResultError(status, verbose):
1876 """Formats result status other than L{constants.RS_NORMAL}.
1877
1878 @param status: The result status
1879 @type verbose: boolean
1880 @param verbose: Whether to return the verbose text
1881 @return: Text of result status
1882
1883 """
1884 assert status != constants.RS_NORMAL, \
1885 "FormatResultError called with status equal to constants.RS_NORMAL"
1886 try:
1887 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
1888 except KeyError:
1889 raise NotImplementedError("Unknown status %s" % status)
1890 else:
1891 if verbose:
1892 return verbose_text
1893 return normal_text
1894
1895
1896 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
1897 header=False, verbose=False):
1898 """Formats data in L{objects.QueryResponse}.
1899
1900 @type result: L{objects.QueryResponse}
1901 @param result: result of query operation
1902 @type unit: string
1903 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
1904 see L{utils.text.FormatUnit}
1905 @type format_override: dict
1906 @param format_override: Dictionary for overriding field formatting functions,
1907 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
1908 @type separator: string or None
1909 @param separator: String used to separate fields
1910 @type header: bool
1911 @param header: Whether to output header row
1912 @type verbose: boolean
1913 @param verbose: whether to use verbose field descriptions or not
1914
1915 """
1916 if unit is None:
1917 if separator:
1918 unit = "m"
1919 else:
1920 unit = "h"
1921
1922 if format_override is None:
1923 format_override = {}
1924
1925 stats = dict.fromkeys(constants.RS_ALL, 0)
1926
1927 def _RecordStatus(status):
1928 if status in stats:
1929 stats[status] += 1
1930
1931 columns = []
1932 for fdef in result.fields:
1933 assert fdef.title and fdef.name
1934 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
1935 columns.append(TableColumn(fdef.title,
1936 _QueryColumnFormatter(fn, _RecordStatus,
1937 verbose),
1938 align_right))
1939
1940 table = FormatTable(result.data, columns, header, separator)
1941
1942 # Collect statistics
1943 assert len(stats) == len(constants.RS_ALL)
1944 assert compat.all(count >= 0 for count in stats.values())
1945
1946 # Determine overall status. If there was no data, unknown fields must be
1947 # detected via the field definitions.
1948 if (stats[constants.RS_UNKNOWN] or
1949 (not result.data and _GetUnknownFields(result.fields))):
1950 status = QR_UNKNOWN
1951 elif compat.any(count > 0 for key, count in stats.items()
1952 if key != constants.RS_NORMAL):
1953 status = QR_INCOMPLETE
1954 else:
1955 status = QR_NORMAL
1956
1957 return (status, table)
1958
1959
1960 def _GetUnknownFields(fdefs):
1961 """Returns list of unknown fields included in C{fdefs}.
1962
1963 @type fdefs: list of L{objects.QueryFieldDefinition}
1964
1965 """
1966 return [fdef for fdef in fdefs
1967 if fdef.kind == constants.QFT_UNKNOWN]
1968
1969
1970 def _WarnUnknownFields(fdefs):
1971 """Prints a warning to stderr if a query included unknown fields.
1972
1973 @type fdefs: list of L{objects.QueryFieldDefinition}
1974
1975 """
1976 unknown = _GetUnknownFields(fdefs)
1977 if unknown:
1978 ToStderr("Warning: Queried for unknown fields %s",
1979 utils.CommaJoin(fdef.name for fdef in unknown))
1980 return True
1981
1982 return False
1983
1984
1985 def GenericList(resource, fields, names, unit, separator, header, cl=None,
1986 format_override=None, verbose=False, force_filter=False,
1987 namefield=None, qfilter=None, isnumeric=False):
1988 """Generic implementation for listing all items of a resource.
1989
1990 @param resource: One of L{constants.QR_VIA_LUXI}
1991 @type fields: list of strings
1992 @param fields: List of fields to query for
1993 @type names: list of strings
1994 @param names: Names of items to query for
1995 @type unit: string or None
1996 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
1997 None for automatic choice (human-readable for non-separator usage,
1998 otherwise megabytes); this is a one-letter string
1999 @type separator: string or None
2000 @param separator: String used to separate fields
2001 @type header: bool
2002 @param header: Whether to show header row
2003 @type force_filter: bool
2004 @param force_filter: Whether to always treat names as filter
2005 @type format_override: dict
2006 @param format_override: Dictionary for overriding field formatting functions,
2007 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2008 @type verbose: boolean
2009 @param verbose: whether to use verbose field descriptions or not
2010 @type namefield: string
2011 @param namefield: Name of field to use for simple filters (see
2012 L{qlang.MakeFilter} for details)
2013 @type qfilter: list or None
2014 @param qfilter: Query filter (in addition to names)
2015 @param isnumeric: bool
2016 @param isnumeric: Whether the namefield's type is numeric, and therefore
2017 any simple filters built by namefield should use integer values to
2018 reflect that
2019
2020 """
2021 if not names:
2022 names = None
2023
2024 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2025 isnumeric=isnumeric)
2026
2027 if qfilter is None:
2028 qfilter = namefilter
2029 elif namefilter is not None:
2030 qfilter = [qlang.OP_AND, namefilter, qfilter]
2031
2032 if cl is None:
2033 cl = GetClient()
2034
2035 response = cl.Query(resource, fields, qfilter)
2036
2037 found_unknown = _WarnUnknownFields(response.fields)
2038
2039 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2040 header=header,
2041 format_override=format_override,
2042 verbose=verbose)
2043
2044 for line in data:
2045 ToStdout(line)
2046
2047 assert ((found_unknown and status == QR_UNKNOWN) or
2048 (not found_unknown and status != QR_UNKNOWN))
2049
2050 if status == QR_UNKNOWN:
2051 return constants.EXIT_UNKNOWN_FIELD
2052
2053 # TODO: Should the list command fail if not all data could be collected?
2054 return constants.EXIT_SUCCESS
2055
2056
2057 def _FieldDescValues(fdef):
2058 """Helper function for L{GenericListFields} to get query field description.
2059
2060 @type fdef: L{objects.QueryFieldDefinition}
2061 @rtype: list
2062
2063 """
2064 return [
2065 fdef.name,
2066 _QFT_NAMES.get(fdef.kind, fdef.kind),
2067 fdef.title,
2068 fdef.doc,
2069 ]
2070
2071
2072 def GenericListFields(resource, fields, separator, header, cl=None):
2073 """Generic implementation for listing fields for a resource.
2074
2075 @param resource: One of L{constants.QR_VIA_LUXI}
2076 @type fields: list of strings
2077 @param fields: List of fields to query for
2078 @type separator: string or None
2079 @param separator: String used to separate fields
2080 @type header: bool
2081 @param header: Whether to show header row
2082
2083 """
2084 if cl is None:
2085 cl = GetClient()
2086
2087 if not fields:
2088 fields = None
2089
2090 response = cl.QueryFields(resource, fields)
2091
2092 found_unknown = _WarnUnknownFields(response.fields)
2093
2094 columns = [
2095 TableColumn("Name", str, False),
2096 TableColumn("Type", str, False),
2097 TableColumn("Title", str, False),
2098 TableColumn("Description", str, False),
2099 ]
2100
2101 rows = map(_FieldDescValues, response.fields)
2102
2103 for line in FormatTable(rows, columns, header, separator):
2104 ToStdout(line)
2105
2106 if found_unknown:
2107 return constants.EXIT_UNKNOWN_FIELD
2108
2109 return constants.EXIT_SUCCESS
2110
2111
2112 class TableColumn(object):
2113 """Describes a column for L{FormatTable}.
2114
2115 """
2116 def __init__(self, title, fn, align_right):
2117 """Initializes this class.
2118
2119 @type title: string
2120 @param title: Column title
2121 @type fn: callable
2122 @param fn: Formatting function
2123 @type align_right: bool
2124 @param align_right: Whether to align values on the right-hand side
2125
2126 """
2127 self.title = title
2128 self.format = fn
2129 self.align_right = align_right
2130
2131
2132 def _GetColFormatString(width, align_right):
2133 """Returns the format string for a field.
2134
2135 """
2136 if align_right:
2137 sign = ""
2138 else:
2139 sign = "-"
2140
2141 return "%%%s%ss" % (sign, width)
2142
2143
2144 def FormatTable(rows, columns, header, separator):
2145 """Formats data as a table.
2146
2147 @type rows: list of lists
2148 @param rows: Row data, one list per row
2149 @type columns: list of L{TableColumn}
2150 @param columns: Column descriptions
2151 @type header: bool
2152 @param header: Whether to show header row
2153 @type separator: string or None
2154 @param separator: String used to separate columns
2155
2156 """
2157 if header:
2158 data = [[col.title for col in columns]]
2159 colwidth = [len(col.title) for col in columns]
2160 else:
2161 data = []
2162 colwidth = [0 for _ in columns]
2163
2164 # Format row data
2165 for row in rows:
2166 assert len(row) == len(columns)
2167
2168 formatted = [col.format(value) for value, col in zip(row, columns)]
2169
2170 if separator is None:
2171 # Update column widths
2172 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2173 # Modifying a list's items while iterating is fine
2174 colwidth[idx] = max(oldwidth, len(value))
2175
2176 data.append(formatted)
2177
2178 if separator is not None:
2179 # Return early if a separator is used
2180 return [separator.join(row) for row in data]
2181
2182 if columns and not columns[-1].align_right:
2183 # Avoid unnecessary spaces at end of line
2184 colwidth[-1] = 0
2185
2186 # Build format string
2187 fmt = " ".join([_GetColFormatString(width, col.align_right)
2188 for col, width in zip(columns, colwidth)])
2189
2190 return [fmt % tuple(row) for row in data]
2191
2192
2193 def FormatTimestamp(ts):
2194 """Formats a given timestamp.
2195
2196 @type ts: timestamp
2197 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2198
2199 @rtype: string
2200 @return: a string with the formatted timestamp
2201
2202 """
2203 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2204 return "?"
2205
2206 (sec, usecs) = ts
2207 return utils.FormatTime(sec, usecs=usecs)
2208
2209
2210 def ParseTimespec(value):
2211 """Parse a time specification.
2212
2213 The following suffixed will be recognized:
2214
2215 - s: seconds
2216 - m: minutes
2217 - h: hours
2218 - d: day
2219 - w: weeks
2220
2221 Without any suffix, the value will be taken to be in seconds.
2222
2223 """
2224 value = str(value)
2225 if not value:
2226 raise errors.OpPrereqError("Empty time specification passed",
2227 errors.ECODE_INVAL)
2228 suffix_map = {
2229 "s": 1,
2230 "m": 60,
2231 "h": 3600,
2232 "d": 86400,
2233 "w": 604800,
2234 }
2235 if value[-1] not in suffix_map:
2236 try:
2237 value = int(value)
2238 except (TypeError, ValueError):
2239 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
2240 errors.ECODE_INVAL)
2241 else:
2242 multiplier = suffix_map[value[-1]]
2243 value = value[:-1]
2244 if not value: # no data left after stripping the suffix
2245 raise errors.OpPrereqError("Invalid time specification (only"
2246 " suffix passed)", errors.ECODE_INVAL)
2247 try:
2248 value = int(value) * multiplier
2249 except (TypeError, ValueError):
2250 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
2251 errors.ECODE_INVAL)
2252 return value
2253
2254
2255 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2256 filter_master=False, nodegroup=None):
2257 """Returns the names of online nodes.
2258
2259 This function will also log a warning on stderr with the names of
2260 the online nodes.
2261
2262 @param nodes: if not empty, use only this subset of nodes (minus the
2263 offline ones)
2264 @param cl: if not None, luxi client to use
2265 @type nowarn: boolean
2266 @param nowarn: by default, this function will output a note with the
2267 offline nodes that are skipped; if this parameter is True the
2268 note is not displayed
2269 @type secondary_ips: boolean
2270 @param secondary_ips: if True, return the secondary IPs instead of the
2271 names, useful for doing network traffic over the replication interface
2272 (if any)
2273 @type filter_master: boolean
2274 @param filter_master: if True, do not return the master node in the list
2275 (useful in coordination with secondary_ips where we cannot check our
2276 node name against the list)
2277 @type nodegroup: string
2278 @param nodegroup: If set, only return nodes in this node group
2279
2280 """
2281 if cl is None:
2282 cl = GetClient()
2283
2284 qfilter = []
2285
2286 if nodes:
2287 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2288
2289 if nodegroup is not None:
2290 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2291 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2292
2293 if filter_master:
2294 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2295
2296 if qfilter:
2297 if len(qfilter) > 1:
2298 final_filter = [qlang.OP_AND] + qfilter
2299 else:
2300 assert len(qfilter) == 1
2301 final_filter = qfilter[0]
2302 else:
2303 final_filter = None
2304
2305 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2306
2307 def _IsOffline(row):
2308 (_, (_, offline), _) = row
2309 return offline
2310
2311 def _GetName(row):
2312 ((_, name), _, _) = row
2313 return name
2314
2315 def _GetSip(row):
2316 (_, _, (_, sip)) = row
2317 return sip
2318
2319 (offline, online) = compat.partition(result.data, _IsOffline)
2320
2321 if offline and not nowarn:
2322 ToStderr("Note: skipping offline node(s): %s" %
2323 utils.CommaJoin(map(_GetName, offline)))
2324
2325 if secondary_ips:
2326 fn = _GetSip
2327 else:
2328 fn = _GetName
2329
2330 return map(fn, online)
2331
2332
2333 def GetNodesSshPorts(nodes, cl):
2334 """Retrieves SSH ports of given nodes.
2335
2336 @param nodes: the names of nodes
2337 @type nodes: a list of strings
2338 @param cl: a client to use for the query
2339 @type cl: L{ganeti.luxi.Client}
2340 @return: the list of SSH ports corresponding to the nodes
2341 @rtype: a list of tuples
2342
2343 """
2344 return [t[0] for t in cl.QueryNodes(names=nodes,
2345 fields=["ndp/ssh_port"],
2346 use_locking=False)]
2347
2348
2349 def GetNodeUUIDs(nodes, cl):
2350 """Retrieves the UUIDs of given nodes.
2351
2352 @param nodes: the names of nodes
2353 @type nodes: a list of string
2354 @param cl: a client to use for the query
2355 @type cl: L{ganeti.luxi.Client}
2356 @return: the list of UUIDs corresponding to the nodes
2357 @rtype: a list of tuples
2358
2359 """
2360 return [t[0] for t in cl.QueryNodes(names=nodes,
2361 fields=["uuid"],
2362 use_locking=False)]
2363
2364
2365 def _ToStream(stream, txt, *args):
2366 """Write a message to a stream, bypassing the logging system
2367
2368 @type stream: file object
2369 @param stream: the file to which we should write
2370 @type txt: str
2371 @param txt: the message
2372
2373 """
2374 try:
2375 if args:
2376 args = tuple(args)
2377 stream.write(txt % args)
2378 else:
2379 stream.write(txt)
2380 stream.write("\n")
2381 stream.flush()
2382 except IOError, err:
2383 if err.errno == errno.EPIPE:
2384 # our terminal went away, we'll exit
2385 sys.exit(constants.EXIT_FAILURE)
2386 else:
2387 raise
2388
2389
2390 def ToStdout(txt, *args):
2391 """Write a message to stdout only, bypassing the logging system
2392
2393 This is just a wrapper over _ToStream.
2394
2395 @type txt: str
2396 @param txt: the message
2397
2398 """
2399 _ToStream(sys.stdout, txt, *args)
2400
2401
2402 def ToStdoutAndLoginfo(txt, *args):
2403 """Write a message to stdout and additionally log it at INFO level"""
2404 ToStdout(txt, *args)
2405 logging.info(txt, *args)
2406
2407
2408 def ToStderr(txt, *args):
2409 """Write a message to stderr only, bypassing the logging system
2410
2411 This is just a wrapper over _ToStream.
2412
2413 @type txt: str
2414 @param txt: the message
2415
2416 """
2417 _ToStream(sys.stderr, txt, *args)
2418
2419
2420 class JobExecutor(object):
2421 """Class which manages the submission and execution of multiple jobs.
2422
2423 Note that instances of this class should not be reused between
2424 GetResults() calls.
2425
2426 """
2427 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2428 self.queue = []
2429 if cl is None:
2430 cl = GetClient()
2431 self.cl = cl
2432 self.verbose = verbose
2433 self.jobs = []
2434 self.opts = opts
2435 self.feedback_fn = feedback_fn
2436 self._counter = itertools.count()
2437
2438 @staticmethod
2439 def _IfName(name, fmt):
2440 """Helper function for formatting name.
2441
2442 """
2443 if name:
2444 return fmt % name
2445
2446 return ""
2447
2448 def QueueJob(self, name, *ops):
2449 """Record a job for later submit.
2450
2451 @type name: string
2452 @param name: a description of the job, will be used in WaitJobSet
2453
2454 """
2455 SetGenericOpcodeOpts(ops, self.opts)
2456 self.queue.append((self._counter.next(), name, ops))
2457
2458 def AddJobId(self, name, status, job_id):
2459 """Adds a job ID to the internal queue.
2460
2461 """
2462 self.jobs.append((self._counter.next(), status, job_id, name))
2463
2464 def SubmitPending(self, each=False):
2465 """Submit all pending jobs.
2466
2467 """
2468 if each:
2469 results = []
2470 for (_, _, ops) in self.queue:
2471 # SubmitJob will remove the success status, but raise an exception if
2472 # the submission fails, so we'll notice that anyway.
2473 results.append([True, self.cl.SubmitJob(ops)[0]])
2474 else:
2475 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
2476 for ((status, data), (idx, name, _)) in zip(results, self.queue):
2477 self.jobs.append((idx, status, data, name))
2478
2479 def _ChooseJob(self):
2480 """Choose a non-waiting/queued job to poll next.
2481
2482 """
2483 assert self.jobs, "_ChooseJob called with empty job list"
2484
2485 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
2486 ["status"])
2487 assert result
2488
2489 for job_data, status in zip(self.jobs, result):
2490 if (isinstance(status, list) and status and
2491 status[0] in (constants.JOB_STATUS_QUEUED,
2492 constants.JOB_STATUS_WAITING,
2493 constants.JOB_STATUS_CANCELING)):
2494 # job is still present and waiting
2495 continue
2496 # good candidate found (either running job or lost job)
2497 self.jobs.remove(job_data)
2498 return job_data
2499
2500 # no job found
2501 return self.jobs.pop(0)
2502
2503 def GetResults(self):
2504 """Wait for and return the results of all jobs.
2505
2506 @rtype: list
2507 @return: list of tuples (success, job results), in the same order
2508 as the submitted jobs; if a job has failed, instead of the result
2509 there will be the error message
2510
2511 """
2512 if not self.jobs:
2513 self.SubmitPending()
2514 results = []
2515 if self.verbose:
2516 ok_jobs = [row[2] for row in self.jobs if row[1]]
2517 if ok_jobs:
2518 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2519
2520 # first, remove any non-submitted jobs
2521 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2522 for idx, _, jid, name in failures:
2523 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
2524 results.append((idx, False, jid))
2525
2526 while self.jobs:
2527 (idx, _, jid, name) = self._ChooseJob()
2528 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
2529 try:
2530 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2531 success = True
2532 except errors.JobLost, err:
2533 _, job_result = FormatError(err)
2534 ToStderr("Job %s%s has been archived, cannot check its result",
2535 jid, self._IfName(name, " for %s"))
2536 success = False
2537 except (errors.GenericError, rpcerr.ProtocolError), err:
2538 _, job_result = FormatError(err)
2539 success = False
2540 # the error message will always be shown, verbose or not
2541 ToStderr("Job %s%s has failed: %s",
2542 jid, self._IfName(name, " for %s"), job_result)
2543
2544 results.append((idx, success, job_result))
2545
2546 # sort based on the index, then drop it
2547 results.sort()
2548 results = [i[1:] for i in results]
2549
2550 return results
2551
2552 def WaitOrShow(self, wait):
2553 """Wait for job results or only print the job IDs.
2554
2555 @type wait: boolean
2556 @param wait: whether to wait or not
2557
2558 """
2559 if wait:
2560 return self.GetResults()
2561 else:
2562 if not self.jobs:
2563 self.SubmitPending()
2564 for _, status, result, name in self.jobs:
2565 if status:
2566 ToStdout("%s: %s", result, name)
2567 else:
2568 ToStderr("Failure for %s: %s", name, result)
2569 return [row[1:3] for row in self.jobs]
2570
2571
2572 def FormatParamsDictInfo(param_dict, actual, roman=False):
2573 """Formats a parameter dictionary.
2574
2575 @type param_dict: dict
2576 @param param_dict: the own parameters
2577 @type actual: dict
2578 @param actual: the current parameter set (including defaults)
2579 @rtype: dict
2580 @return: dictionary where the value of each parameter is either a fully
2581 formatted string or a dictionary containing formatted strings
2582
2583 """
2584 ret = {}
2585 for (key, data) in actual.items():
2586 if isinstance(data, dict) and data:
2587 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data, roman)
2588 else:
2589 default_str = "default (%s)" % compat.TryToRoman(data, roman)
2590 ret[key] = str(compat.TryToRoman(param_dict.get(key, default_str), roman))
2591 return ret
2592
2593
2594 def _FormatListInfoDefault(data, def_data):
2595 if data is not None:
2596 ret = utils.CommaJoin(data)
2597 else:
2598 ret = "default (%s)" % utils.CommaJoin(def_data)
2599 return ret
2600
2601
2602 def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster, roman=False):
2603 """Formats an instance policy.
2604
2605 @type custom_ipolicy: dict
2606 @param custom_ipolicy: own policy
2607 @type eff_ipolicy: dict
2608 @param eff_ipolicy: effective policy (including defaults); ignored for
2609 cluster
2610 @type iscluster: bool
2611 @param iscluster: the policy is at cluster level
2612 @type roman: bool
2613 @param roman: whether to print the values in roman numerals
2614 @rtype: list of pairs
2615 @return: formatted data, suitable for L{PrintGenericInfo}
2616
2617 """
2618 if iscluster:
2619 eff_ipolicy = custom_ipolicy
2620
2621 minmax_out = []
2622 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
2623 if custom_minmax:
2624 for (k, minmax) in enumerate(custom_minmax):
2625 minmax_out.append([
2626 ("%s/%s" % (key, k),
2627 FormatParamsDictInfo(minmax[key], minmax[key], roman))
2628 for key in constants.ISPECS_MINMAX_KEYS
2629 ])
2630 else:
2631 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
2632 minmax_out.append([
2633 ("%s/%s" % (key, k),
2634 FormatParamsDictInfo({}, minmax[key], roman))
2635 for key in constants.ISPECS_MINMAX_KEYS
2636 ])
2637 ret = [("bounds specs", minmax_out)]
2638
2639 if iscluster:
2640 stdspecs = custom_ipolicy[constants.ISPECS_STD]
2641 ret.append(
2642 (constants.ISPECS_STD,
2643 FormatParamsDictInfo(stdspecs, stdspecs, roman))
2644 )
2645
2646 ret.append(
2647 ("allowed disk templates",
2648 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
2649 eff_ipolicy[constants.IPOLICY_DTS]))
2650 )
2651 to_roman = compat.TryToRoman
2652 ret.extend([
2653 (key, str(to_roman(custom_ipolicy.get(key,
2654 "default (%s)" % eff_ipolicy[key]),
2655 roman)))
2656 for key in constants.IPOLICY_PARAMETERS
2657 ])
2658 return ret
2659
2660
2661 def _PrintSpecsParameters(buf, specs):
2662 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
2663 buf.write(",".join(values))
2664
2665
2666 def PrintIPolicyCommand(buf, ipolicy, isgroup):
2667 """Print the command option used to generate the given instance policy.
2668
2669 Currently only the parts dealing with specs are supported.
2670
2671 @type buf: StringIO
2672 @param buf: stream to write into
2673 @type ipolicy: dict
2674 @param ipolicy: instance policy
2675 @type isgroup: bool
2676 @param isgroup: whether the policy is at group level
2677
2678 """
2679 if not isgroup:
2680 stdspecs = ipolicy.get("std")
2681 if stdspecs:
2682 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
2683 _PrintSpecsParameters(buf, stdspecs)
2684 minmaxes = ipolicy.get("minmax", [])
2685 first = True
2686 for minmax in minmaxes:
2687 minspecs = minmax.get("min")
2688 maxspecs = minmax.get("max")
2689 if minspecs and maxspecs:
2690 if first:
2691 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
2692 first = False
2693 else:
2694 buf.write("//")
2695 buf.write("min:")
2696 _PrintSpecsParameters(buf, minspecs)
2697 buf.write("/max:")
2698 _PrintSpecsParameters(buf, maxspecs)
2699
2700
2701 def ConfirmOperation(names, list_type, text, extra=""):
2702 """Ask the user to confirm an operation on a list of list_type.
2703
2704 This function is used to request confirmation for doing an operation
2705 on a given list of list_type.
2706
2707 @type names: list
2708 @param names: the list of names that we display when
2709 we ask for confirmation
2710 @type list_type: str
2711 @param list_type: Human readable name for elements in the list (e.g. nodes)
2712 @type text: str
2713 @param text: the operation that the user should confirm
2714 @rtype: boolean
2715 @return: True or False depending on user's confirmation.
2716
2717 """
2718 count = len(names)
2719 msg = ("The %s will operate on %d %s.\n%s"
2720 "Do you want to continue?" % (text, count, list_type, extra))
2721 affected = (("\nAffected %s:\n" % list_type) +
2722 "\n".join([" %s" % name for name in names]))
2723
2724 choices = [("y", True, "Yes, execute the %s" % text),
2725 ("n", False, "No, abort the %s" % text)]
2726
2727 if count > 20:
2728 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
2729 question = msg
2730 else:
2731 question = msg + affected
2732
2733 choice = AskUser(question, choices)
2734 if choice == "v":
2735 choices.pop(1)
2736 choice = AskUser(msg + affected, choices)
2737 return choice
2738
2739
2740 def _MaybeParseUnit(elements):
2741 """Parses and returns an array of potential values with units.
2742
2743 """
2744 parsed = {}
2745 for k, v in elements.items():
2746 if v == constants.VALUE_DEFAULT:
2747 parsed[k] = v
2748 else:
2749 parsed[k] = utils.ParseUnit(v)
2750 return parsed
2751
2752
2753 def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
2754 ispecs_disk_count, ispecs_disk_size,
2755 ispecs_nic_count, group_ipolicy, fill_all):
2756 try:
2757 if ispecs_mem_size:
2758 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
2759 if ispecs_disk_size:
2760 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
2761 except (TypeError, ValueError, errors.UnitParseError), err:
2762 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
2763 " in policy: %s" %
2764 (ispecs_disk_size, ispecs_mem_size, err),
2765 errors.ECODE_INVAL)
2766
2767 # prepare ipolicy dict
2768 ispecs_transposed = {
2769 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
2770 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
2771 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
2772 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
2773 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
2774 }
2775
2776 # first, check that the values given are correct
2777 if group_ipolicy:
2778 forced_type = TISPECS_GROUP_TYPES
2779 else:
2780 forced_type = TISPECS_CLUSTER_TYPES
2781 for specs in ispecs_transposed.values():
2782 assert isinstance(specs, dict)
2783 utils.ForceDictType(specs, forced_type)
2784
2785 # then transpose
2786 ispecs = {
2787 constants.ISPECS_MIN: {},
2788 constants.ISPECS_MAX: {},
2789 constants.ISPECS_STD: {},
2790 }
2791 for (name, specs) in ispecs_transposed.iteritems():
2792 assert name in constants.ISPECS_PARAMETERS
2793 for key, val in specs.items(): # {min: .. ,max: .., std: ..}
2794 assert key in ispecs
2795 ispecs[key][name] = val
2796 minmax_out = {}
2797 for key in constants.ISPECS_MINMAX_KEYS:
2798 if fill_all:
2799 minmax_out[key] = \
2800 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
2801 else:
2802 minmax_out[key] = ispecs[key]
2803 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
2804 if fill_all:
2805 ipolicy[constants.ISPECS_STD] = \
2806 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
2807 ispecs[constants.ISPECS_STD])
2808 else:
2809 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
2810
2811
2812 def _ParseSpecUnit(spec, keyname):
2813 ret = spec.copy()
2814 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
2815 if k in ret:
2816 try:
2817 ret[k] = utils.ParseUnit(ret[k])
2818 except (TypeError, ValueError, errors.UnitParseError), err:
2819 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
2820 " specs: %s" % (k, ret[k], keyname, err)),
2821 errors.ECODE_INVAL)
2822 return ret
2823
2824
2825 def _ParseISpec(spec, keyname, required):
2826 ret = _ParseSpecUnit(spec, keyname)
2827 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
2828 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
2829 if required and missing:
2830 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
2831 (keyname, utils.CommaJoin(missing)),
2832 errors.ECODE_INVAL)
2833 return ret
2834
2835
2836 def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
2837 ret = None
2838 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
2839 len(minmax_ispecs[0]) == 1):
2840 for (key, spec) in minmax_ispecs[0].items():
2841 # This loop is executed exactly once
2842 if key in allowed_values and not spec:
2843 ret = key
2844 return ret
2845
2846
2847 def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
2848 group_ipolicy, allowed_values):
2849 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
2850 if found_allowed is not None:
2851 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
2852 elif minmax_ispecs is not None:
2853 minmax_out = []
2854 for mmpair in minmax_ispecs:
2855 mmpair_out = {}
2856 for (key, spec) in mmpair.items():
2857 if key not in constants.ISPECS_MINMAX_KEYS:
2858 msg = "Invalid key in bounds instance specifications: %s" % key
2859 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2860 mmpair_out[key] = _ParseISpec(spec, key, True)
2861 minmax_out.append(mmpair_out)
2862 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
2863 if std_ispecs is not None:
2864 assert not group_ipolicy # This is not an option for gnt-group
2865 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
2866
2867
2868 def CreateIPolicyFromOpts(ispecs_mem_size=None,
2869 ispecs_cpu_count=None,
2870 ispecs_disk_count=None,
2871 ispecs_disk_size=None,
2872 ispecs_nic_count=None,
2873 minmax_ispecs=None,
2874 std_ispecs=None,
2875 ipolicy_disk_templates=None,
2876 ipolicy_vcpu_ratio=None,
2877 ipolicy_spindle_ratio=None,
2878 ipolicy_memory_ratio=None,
2879 group_ipolicy=False,
2880 allowed_values=None,
2881 fill_all=False):
2882 """Creation of instance policy based on command line options.
2883
2884 @param fill_all: whether for cluster policies we should ensure that
2885 all values are filled
2886
2887 """
2888 assert not (fill_all and allowed_values)
2889
2890 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
2891 ispecs_disk_size or ispecs_nic_count)
2892 if split_specs and (minmax_ispecs is not None or std_ispecs is not None):
2893 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
2894 " together with any --ipolicy-xxx-specs option",
2895 errors.ECODE_INVAL)
2896
2897 ipolicy_out = objects.MakeEmptyIPolicy()
2898 if split_specs:
2899 assert fill_all
2900 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
2901 ispecs_disk_count, ispecs_disk_size,
2902 ispecs_nic_count, group_ipolicy, fill_all)
2903 elif minmax_ispecs is not None or std_ispecs is not None:
2904 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
2905 group_ipolicy, allowed_values)
2906
2907 if ipolicy_disk_templates is not None:
2908 if allowed_values and ipolicy_disk_templates in allowed_values:
2909 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
2910 else:
2911 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
2912 if ipolicy_vcpu_ratio is not None:
2913 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
2914 if ipolicy_spindle_ratio is not None:
2915 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
2916 if ipolicy_memory_ratio is not None:
2917 ipolicy_out[constants.IPOLICY_MEMORY_RATIO] = ipolicy_memory_ratio
2918
2919 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
2920
2921 if not group_ipolicy and fill_all:
2922 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
2923
2924 return ipolicy_out
2925
2926
2927 def _NotAContainer(data):
2928 """ Checks whether the input is not a container data type.
2929
2930 @rtype: bool
2931
2932 """
2933 return not isinstance(data, (list, dict, tuple))
2934
2935
2936 def _GetAlignmentMapping(data):
2937 """ Returns info about alignment if present in an encoded ordered dictionary.
2938
2939 @type data: list of tuple
2940 @param data: The encoded ordered dictionary, as defined in
2941 L{_SerializeGenericInfo}.
2942 @rtype: dict of any to int
2943 @return: The dictionary mapping alignment groups to the maximum length of the
2944 dictionary key found in the group.
2945
2946 """
2947 alignment_map = {}
2948 for entry in data:
2949 if len(entry) > 2:
2950 group_key = entry[2]
2951 key_length = len(entry[0])
2952 if group_key in alignment_map:
2953 alignment_map[group_key] = max(alignment_map[group_key], key_length)
2954 else:
2955 alignment_map[group_key] = key_length
2956
2957 return alignment_map
2958
2959
2960 def _SerializeGenericInfo(buf, data, level, afterkey=False):
2961 """Formatting core of L{PrintGenericInfo}.
2962
2963 @param buf: (string) stream to accumulate the result into
2964 @param data: data to format
2965 @type level: int
2966 @param level: depth in the data hierarchy, used for indenting
2967 @type afterkey: bool
2968 @param afterkey: True when we are in the middle of a line after a key (used
2969 to properly add newlines or indentation)
2970
2971 """
2972 baseind = " "
2973 if isinstance(data, dict):
2974 if not data:
2975 buf.write("\n")
2976 else:
2977 if afterkey:
2978 buf.write("\n")
2979 doindent = True
2980 else:
2981 doindent = False
2982 for key in sorted(data):
2983 if doindent:
2984 buf.write(baseind * level)
2985 else:
2986 doindent = True
2987 buf.write(key)
2988 buf.write(": ")
2989 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
2990 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
2991 # list of tuples (an ordered dictionary)
2992 # the tuples may have two or three members - key, value, and alignment group
2993 # if the alignment group is present, align all values sharing the same group
2994 if afterkey:
2995 buf.write("\n")
2996 doindent = True
2997 else:
2998 doindent = False
2999
3000 alignment_mapping = _GetAlignmentMapping(data)
3001 for entry in data:
3002 key, val = entry[0:2]
3003 if doindent:
3004 buf.write(baseind * level)
3005 else:
3006 doindent = True
3007 buf.write(key)
3008 buf.write(": ")
3009 if len(entry) > 2:
3010 max_key_length = alignment_mapping[entry[2]]
3011 buf.write(" " * (max_key_length - len(key)))
3012 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3013 elif isinstance(data, tuple) and all(map(_NotAContainer, data)):
3014 # tuples with simple content are serialized as inline lists
3015 buf.write("[%s]\n" % utils.CommaJoin(data))
3016 elif isinstance(data, list) or isinstance(data, tuple):
3017 # lists and tuples
3018 if not data:
3019 buf.write("\n")
3020 else:
3021 if afterkey:
3022 buf.write("\n")
3023 doindent = True
3024 else:
3025 doindent = False
3026 for item in data:
3027 if doindent:
3028 buf.write(baseind * level)
3029 else:
3030 doindent = True
3031 buf.write("-")
3032 buf.write(baseind[1:])
3033 _SerializeGenericInfo(buf, item, level + 1)
3034 else:
3035 # This branch should be only taken for strings, but it's practically
3036 # impossible to guarantee that no other types are produced somewhere
3037 buf.write(str(data))
3038 buf.write("\n")
3039
3040
3041 def PrintGenericInfo(data):
3042 """Print information formatted according to the hierarchy.
3043
3044 The output is a valid YAML string.
3045
3046 @param data: the data to print. It's a hierarchical structure whose elements
3047 can be:
3048 - dictionaries, where keys are strings and values are of any of the
3049 types listed here
3050 - lists of tuples (key, value) or (key, value, alignment_group), where
3051 key is a string, value is of any of the types listed here, and
3052 alignment_group can be any hashable value; it's a way to encode
3053 ordered dictionaries; any entries sharing the same alignment group are
3054 aligned by appending whitespace before the value as needed
3055 - lists of any of the types listed here
3056 - strings
3057
3058 """
3059 buf = StringIO()
3060 _SerializeGenericInfo(buf, data, 0)
3061 ToStdout(buf.getvalue().rstrip("\n"))