Add tags in network objects
[ganeti-github.git] / lib / cli.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Module dealing with command line parsing"""
23
24
25 import sys
26 import textwrap
27 import os.path
28 import time
29 import logging
30 import errno
31 import itertools
32 import shlex
33 from cStringIO import StringIO
34
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
46 from ganeti import objects
47 from ganeti import pathutils
48
49 from optparse import (OptionParser, TitledHelpFormatter,
50 Option, OptionValueError)
51
52
53 __all__ = [
54 # Command line options
55 "ABSOLUTE_OPT",
56 "ADD_UIDS_OPT",
57 "ADD_RESERVED_IPS_OPT",
58 "ALLOCATABLE_OPT",
59 "ALLOC_POLICY_OPT",
60 "ALL_OPT",
61 "ALLOW_FAILOVER_OPT",
62 "AUTO_PROMOTE_OPT",
63 "AUTO_REPLACE_OPT",
64 "BACKEND_OPT",
65 "BLK_OS_OPT",
66 "CAPAB_MASTER_OPT",
67 "CAPAB_VM_OPT",
68 "CLEANUP_OPT",
69 "CLUSTER_DOMAIN_SECRET_OPT",
70 "CONFIRM_OPT",
71 "CP_SIZE_OPT",
72 "DEBUG_OPT",
73 "DEBUG_SIMERR_OPT",
74 "DISKIDX_OPT",
75 "DISK_OPT",
76 "DISK_PARAMS_OPT",
77 "DISK_TEMPLATE_OPT",
78 "DRAINED_OPT",
79 "DRY_RUN_OPT",
80 "DRBD_HELPER_OPT",
81 "DST_NODE_OPT",
82 "EARLY_RELEASE_OPT",
83 "ENABLED_HV_OPT",
84 "ERROR_CODES_OPT",
85 "FIELDS_OPT",
86 "FILESTORE_DIR_OPT",
87 "FILESTORE_DRIVER_OPT",
88 "FORCE_FILTER_OPT",
89 "FORCE_OPT",
90 "FORCE_VARIANT_OPT",
91 "GATEWAY_OPT",
92 "GATEWAY6_OPT",
93 "GLOBAL_FILEDIR_OPT",
94 "HID_OS_OPT",
95 "GLOBAL_SHARED_FILEDIR_OPT",
96 "HVLIST_OPT",
97 "HVOPTS_OPT",
98 "HYPERVISOR_OPT",
99 "IALLOCATOR_OPT",
100 "DEFAULT_IALLOCATOR_OPT",
101 "IDENTIFY_DEFAULTS_OPT",
102 "IGNORE_CONSIST_OPT",
103 "IGNORE_ERRORS_OPT",
104 "IGNORE_FAILURES_OPT",
105 "IGNORE_OFFLINE_OPT",
106 "IGNORE_REMOVE_FAILURES_OPT",
107 "IGNORE_SECONDARIES_OPT",
108 "IGNORE_SIZE_OPT",
109 "INTERVAL_OPT",
110 "MAC_PREFIX_OPT",
111 "MAINTAIN_NODE_HEALTH_OPT",
112 "MASTER_NETDEV_OPT",
113 "MASTER_NETMASK_OPT",
114 "MC_OPT",
115 "MIGRATION_MODE_OPT",
116 "NET_OPT",
117 "NETWORK_OPT",
118 "NETWORK6_OPT",
119 "NETWORK_TYPE_OPT",
120 "NEW_CLUSTER_CERT_OPT",
121 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
122 "NEW_CONFD_HMAC_KEY_OPT",
123 "NEW_RAPI_CERT_OPT",
124 "NEW_SECONDARY_OPT",
125 "NEW_SPICE_CERT_OPT",
126 "NIC_PARAMS_OPT",
127 "NOCONFLICTSCHECK_OPT",
128 "NODE_FORCE_JOIN_OPT",
129 "NODE_LIST_OPT",
130 "NODE_PLACEMENT_OPT",
131 "NODEGROUP_OPT",
132 "NODE_PARAMS_OPT",
133 "NODE_POWERED_OPT",
134 "NODRBD_STORAGE_OPT",
135 "NOHDR_OPT",
136 "NOIPCHECK_OPT",
137 "NO_INSTALL_OPT",
138 "NONAMECHECK_OPT",
139 "NOLVM_STORAGE_OPT",
140 "NOMODIFY_ETCHOSTS_OPT",
141 "NOMODIFY_SSH_SETUP_OPT",
142 "NONICS_OPT",
143 "NONLIVE_OPT",
144 "NONPLUS1_OPT",
145 "NORUNTIME_CHGS_OPT",
146 "NOSHUTDOWN_OPT",
147 "NOSTART_OPT",
148 "NOSSH_KEYCHECK_OPT",
149 "NOVOTING_OPT",
150 "NO_REMEMBER_OPT",
151 "NWSYNC_OPT",
152 "OFFLINE_INST_OPT",
153 "ONLINE_INST_OPT",
154 "ON_PRIMARY_OPT",
155 "ON_SECONDARY_OPT",
156 "OFFLINE_OPT",
157 "OSPARAMS_OPT",
158 "OS_OPT",
159 "OS_SIZE_OPT",
160 "OOB_TIMEOUT_OPT",
161 "POWER_DELAY_OPT",
162 "PREALLOC_WIPE_DISKS_OPT",
163 "PRIMARY_IP_VERSION_OPT",
164 "PRIMARY_ONLY_OPT",
165 "PRIORITY_OPT",
166 "RAPI_CERT_OPT",
167 "READD_OPT",
168 "REBOOT_TYPE_OPT",
169 "REMOVE_INSTANCE_OPT",
170 "REMOVE_RESERVED_IPS_OPT",
171 "REMOVE_UIDS_OPT",
172 "RESERVED_LVS_OPT",
173 "RUNTIME_MEM_OPT",
174 "ROMAN_OPT",
175 "SECONDARY_IP_OPT",
176 "SECONDARY_ONLY_OPT",
177 "SELECT_OS_OPT",
178 "SEP_OPT",
179 "SHOWCMD_OPT",
180 "SHOW_MACHINE_OPT",
181 "SHUTDOWN_TIMEOUT_OPT",
182 "SINGLE_NODE_OPT",
183 "SPECS_CPU_COUNT_OPT",
184 "SPECS_DISK_COUNT_OPT",
185 "SPECS_DISK_SIZE_OPT",
186 "SPECS_MEM_SIZE_OPT",
187 "SPECS_NIC_COUNT_OPT",
188 "IPOLICY_DISK_TEMPLATES",
189 "IPOLICY_VCPU_RATIO",
190 "SPICE_CACERT_OPT",
191 "SPICE_CERT_OPT",
192 "SRC_DIR_OPT",
193 "SRC_NODE_OPT",
194 "SUBMIT_OPT",
195 "STARTUP_PAUSED_OPT",
196 "STATIC_OPT",
197 "SYNC_OPT",
198 "TAG_ADD_OPT",
199 "TAG_SRC_OPT",
200 "TIMEOUT_OPT",
201 "TO_GROUP_OPT",
202 "UIDPOOL_OPT",
203 "USEUNITS_OPT",
204 "USE_EXTERNAL_MIP_SCRIPT",
205 "USE_REPL_NET_OPT",
206 "VERBOSE_OPT",
207 "VG_NAME_OPT",
208 "WFSYNC_OPT",
209 "YES_DOIT_OPT",
210 "DISK_STATE_OPT",
211 "HV_STATE_OPT",
212 "IGNORE_IPOLICY_OPT",
213 "INSTANCE_POLICY_OPTS",
214 # Generic functions for CLI programs
215 "ConfirmOperation",
216 "CreateIPolicyFromOpts",
217 "GenericMain",
218 "GenericInstanceCreate",
219 "GenericList",
220 "GenericListFields",
221 "GetClient",
222 "GetOnlineNodes",
223 "JobExecutor",
224 "JobSubmittedException",
225 "ParseTimespec",
226 "RunWhileClusterStopped",
227 "SubmitOpCode",
228 "SubmitOrSend",
229 "UsesRPC",
230 # Formatting functions
231 "ToStderr", "ToStdout",
232 "FormatError",
233 "FormatQueryResult",
234 "FormatParameterDict",
235 "GenerateTable",
236 "AskUser",
237 "FormatTimestamp",
238 "FormatLogMessage",
239 # Tags functions
240 "ListTags",
241 "AddTags",
242 "RemoveTags",
243 # command line options support infrastructure
244 "ARGS_MANY_INSTANCES",
245 "ARGS_MANY_NODES",
246 "ARGS_MANY_GROUPS",
247 "ARGS_MANY_NETWORKS",
248 "ARGS_NONE",
249 "ARGS_ONE_INSTANCE",
250 "ARGS_ONE_NODE",
251 "ARGS_ONE_GROUP",
252 "ARGS_ONE_OS",
253 "ARGS_ONE_NETWORK",
254 "ArgChoice",
255 "ArgCommand",
256 "ArgFile",
257 "ArgGroup",
258 "ArgHost",
259 "ArgInstance",
260 "ArgJobId",
261 "ArgNetwork",
262 "ArgNode",
263 "ArgOs",
264 "ArgSuggest",
265 "ArgUnknown",
266 "OPT_COMPL_INST_ADD_NODES",
267 "OPT_COMPL_MANY_NODES",
268 "OPT_COMPL_ONE_IALLOCATOR",
269 "OPT_COMPL_ONE_INSTANCE",
270 "OPT_COMPL_ONE_NODE",
271 "OPT_COMPL_ONE_NODEGROUP",
272 "OPT_COMPL_ONE_NETWORK",
273 "OPT_COMPL_ONE_OS",
274 "cli_option",
275 "SplitNodeOption",
276 "CalculateOSNames",
277 "ParseFields",
278 "COMMON_CREATE_OPTS",
279 ]
280
281 NO_PREFIX = "no_"
282 UN_PREFIX = "-"
283
284 #: Priorities (sorted)
285 _PRIORITY_NAMES = [
286 ("low", constants.OP_PRIO_LOW),
287 ("normal", constants.OP_PRIO_NORMAL),
288 ("high", constants.OP_PRIO_HIGH),
289 ]
290
291 #: Priority dictionary for easier lookup
292 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
293 # we migrate to Python 2.6
294 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
295
296 # Query result status for clients
297 (QR_NORMAL,
298 QR_UNKNOWN,
299 QR_INCOMPLETE) = range(3)
300
301 #: Maximum batch size for ChooseJob
302 _CHOOSE_BATCH = 25
303
304
305 # constants used to create InstancePolicy dictionary
306 TISPECS_GROUP_TYPES = {
307 constants.ISPECS_MIN: constants.VTYPE_INT,
308 constants.ISPECS_MAX: constants.VTYPE_INT,
309 }
310
311 TISPECS_CLUSTER_TYPES = {
312 constants.ISPECS_MIN: constants.VTYPE_INT,
313 constants.ISPECS_MAX: constants.VTYPE_INT,
314 constants.ISPECS_STD: constants.VTYPE_INT,
315 }
316
317
318 class _Argument:
319 def __init__(self, min=0, max=None): # pylint: disable=W0622
320 self.min = min
321 self.max = max
322
323 def __repr__(self):
324 return ("<%s min=%s max=%s>" %
325 (self.__class__.__name__, self.min, self.max))
326
327
328 class ArgSuggest(_Argument):
329 """Suggesting argument.
330
331 Value can be any of the ones passed to the constructor.
332
333 """
334 # pylint: disable=W0622
335 def __init__(self, min=0, max=None, choices=None):
336 _Argument.__init__(self, min=min, max=max)
337 self.choices = choices
338
339 def __repr__(self):
340 return ("<%s min=%s max=%s choices=%r>" %
341 (self.__class__.__name__, self.min, self.max, self.choices))
342
343
344 class ArgChoice(ArgSuggest):
345 """Choice argument.
346
347 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
348 but value must be one of the choices.
349
350 """
351
352
353 class ArgUnknown(_Argument):
354 """Unknown argument to program (e.g. determined at runtime).
355
356 """
357
358
359 class ArgInstance(_Argument):
360 """Instances argument.
361
362 """
363
364
365 class ArgNode(_Argument):
366 """Node argument.
367
368 """
369
370
371 class ArgNetwork(_Argument):
372 """Network argument.
373
374 """
375
376 class ArgGroup(_Argument):
377 """Node group argument.
378
379 """
380
381
382 class ArgJobId(_Argument):
383 """Job ID argument.
384
385 """
386
387
388 class ArgFile(_Argument):
389 """File path argument.
390
391 """
392
393
394 class ArgCommand(_Argument):
395 """Command argument.
396
397 """
398
399
400 class ArgHost(_Argument):
401 """Host argument.
402
403 """
404
405
406 class ArgOs(_Argument):
407 """OS argument.
408
409 """
410
411
412 ARGS_NONE = []
413 ARGS_MANY_INSTANCES = [ArgInstance()]
414 ARGS_MANY_NETWORKS = [ArgNetwork()]
415 ARGS_MANY_NODES = [ArgNode()]
416 ARGS_MANY_GROUPS = [ArgGroup()]
417 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
418 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
419 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
420 # TODO
421 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
422 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
423
424
425 def _ExtractTagsObject(opts, args):
426 """Extract the tag type object.
427
428 Note that this function will modify its args parameter.
429
430 """
431 if not hasattr(opts, "tag_type"):
432 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
433 kind = opts.tag_type
434 if kind == constants.TAG_CLUSTER:
435 retval = kind, None
436 elif kind in (constants.TAG_NODEGROUP,
437 constants.TAG_NODE,
438 constants.TAG_NETWORK,
439 constants.TAG_INSTANCE):
440 if not args:
441 raise errors.OpPrereqError("no arguments passed to the command",
442 errors.ECODE_INVAL)
443 name = args.pop(0)
444 retval = kind, name
445 else:
446 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
447 return retval
448
449
450 def _ExtendTags(opts, args):
451 """Extend the args if a source file has been given.
452
453 This function will extend the tags with the contents of the file
454 passed in the 'tags_source' attribute of the opts parameter. A file
455 named '-' will be replaced by stdin.
456
457 """
458 fname = opts.tags_source
459 if fname is None:
460 return
461 if fname == "-":
462 new_fh = sys.stdin
463 else:
464 new_fh = open(fname, "r")
465 new_data = []
466 try:
467 # we don't use the nice 'new_data = [line.strip() for line in fh]'
468 # because of python bug 1633941
469 while True:
470 line = new_fh.readline()
471 if not line:
472 break
473 new_data.append(line.strip())
474 finally:
475 new_fh.close()
476 args.extend(new_data)
477
478
479 def ListTags(opts, args):
480 """List the tags on a given object.
481
482 This is a generic implementation that knows how to deal with all
483 three cases of tag objects (cluster, node, instance). The opts
484 argument is expected to contain a tag_type field denoting what
485 object type we work on.
486
487 """
488 kind, name = _ExtractTagsObject(opts, args)
489 cl = GetClient(query=True)
490 result = cl.QueryTags(kind, name)
491 result = list(result)
492 result.sort()
493 for tag in result:
494 ToStdout(tag)
495
496
497 def AddTags(opts, args):
498 """Add tags on a given object.
499
500 This is a generic implementation that knows how to deal with all
501 three cases of tag objects (cluster, node, instance). The opts
502 argument is expected to contain a tag_type field denoting what
503 object type we work on.
504
505 """
506 kind, name = _ExtractTagsObject(opts, args)
507 _ExtendTags(opts, args)
508 if not args:
509 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
510 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
511 SubmitOrSend(op, opts)
512
513
514 def RemoveTags(opts, args):
515 """Remove tags from a given object.
516
517 This is a generic implementation that knows how to deal with all
518 three cases of tag objects (cluster, node, instance). The opts
519 argument is expected to contain a tag_type field denoting what
520 object type we work on.
521
522 """
523 kind, name = _ExtractTagsObject(opts, args)
524 _ExtendTags(opts, args)
525 if not args:
526 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
527 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
528 SubmitOrSend(op, opts)
529
530
531 def check_unit(option, opt, value): # pylint: disable=W0613
532 """OptParsers custom converter for units.
533
534 """
535 try:
536 return utils.ParseUnit(value)
537 except errors.UnitParseError, err:
538 raise OptionValueError("option %s: %s" % (opt, err))
539
540
541 def _SplitKeyVal(opt, data):
542 """Convert a KeyVal string into a dict.
543
544 This function will convert a key=val[,...] string into a dict. Empty
545 values will be converted specially: keys which have the prefix 'no_'
546 will have the value=False and the prefix stripped, the others will
547 have value=True.
548
549 @type opt: string
550 @param opt: a string holding the option name for which we process the
551 data, used in building error messages
552 @type data: string
553 @param data: a string of the format key=val,key=val,...
554 @rtype: dict
555 @return: {key=val, key=val}
556 @raises errors.ParameterError: if there are duplicate keys
557
558 """
559 kv_dict = {}
560 if data:
561 for elem in utils.UnescapeAndSplit(data, sep=","):
562 if "=" in elem:
563 key, val = elem.split("=", 1)
564 else:
565 if elem.startswith(NO_PREFIX):
566 key, val = elem[len(NO_PREFIX):], False
567 elif elem.startswith(UN_PREFIX):
568 key, val = elem[len(UN_PREFIX):], None
569 else:
570 key, val = elem, True
571 if key in kv_dict:
572 raise errors.ParameterError("Duplicate key '%s' in option %s" %
573 (key, opt))
574 kv_dict[key] = val
575 return kv_dict
576
577
578 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
579 """Custom parser for ident:key=val,key=val options.
580
581 This will store the parsed values as a tuple (ident, {key: val}). As such,
582 multiple uses of this option via action=append is possible.
583
584 """
585 if ":" not in value:
586 ident, rest = value, ""
587 else:
588 ident, rest = value.split(":", 1)
589
590 if ident.startswith(NO_PREFIX):
591 if rest:
592 msg = "Cannot pass options when removing parameter groups: %s" % value
593 raise errors.ParameterError(msg)
594 retval = (ident[len(NO_PREFIX):], False)
595 elif (ident.startswith(UN_PREFIX) and
596 (len(ident) <= len(UN_PREFIX) or
597 not ident[len(UN_PREFIX)][0].isdigit())):
598 if rest:
599 msg = "Cannot pass options when removing parameter groups: %s" % value
600 raise errors.ParameterError(msg)
601 retval = (ident[len(UN_PREFIX):], None)
602 else:
603 kv_dict = _SplitKeyVal(opt, rest)
604 retval = (ident, kv_dict)
605 return retval
606
607
608 def check_key_val(option, opt, value): # pylint: disable=W0613
609 """Custom parser class for key=val,key=val options.
610
611 This will store the parsed values as a dict {key: val}.
612
613 """
614 return _SplitKeyVal(opt, value)
615
616
617 def check_bool(option, opt, value): # pylint: disable=W0613
618 """Custom parser for yes/no options.
619
620 This will store the parsed value as either True or False.
621
622 """
623 value = value.lower()
624 if value == constants.VALUE_FALSE or value == "no":
625 return False
626 elif value == constants.VALUE_TRUE or value == "yes":
627 return True
628 else:
629 raise errors.ParameterError("Invalid boolean value '%s'" % value)
630
631
632 def check_list(option, opt, value): # pylint: disable=W0613
633 """Custom parser for comma-separated lists.
634
635 """
636 # we have to make this explicit check since "".split(",") is [""],
637 # not an empty list :(
638 if not value:
639 return []
640 else:
641 return utils.UnescapeAndSplit(value)
642
643
644 def check_maybefloat(option, opt, value): # pylint: disable=W0613
645 """Custom parser for float numbers which might be also defaults.
646
647 """
648 value = value.lower()
649
650 if value == constants.VALUE_DEFAULT:
651 return value
652 else:
653 return float(value)
654
655
656 # completion_suggestion is normally a list. Using numeric values not evaluating
657 # to False for dynamic completion.
658 (OPT_COMPL_MANY_NODES,
659 OPT_COMPL_ONE_NODE,
660 OPT_COMPL_ONE_INSTANCE,
661 OPT_COMPL_ONE_OS,
662 OPT_COMPL_ONE_IALLOCATOR,
663 OPT_COMPL_ONE_NETWORK,
664 OPT_COMPL_INST_ADD_NODES,
665 OPT_COMPL_ONE_NODEGROUP) = range(100, 108)
666
667 OPT_COMPL_ALL = frozenset([
668 OPT_COMPL_MANY_NODES,
669 OPT_COMPL_ONE_NODE,
670 OPT_COMPL_ONE_INSTANCE,
671 OPT_COMPL_ONE_OS,
672 OPT_COMPL_ONE_IALLOCATOR,
673 OPT_COMPL_ONE_NETWORK,
674 OPT_COMPL_INST_ADD_NODES,
675 OPT_COMPL_ONE_NODEGROUP,
676 ])
677
678
679 class CliOption(Option):
680 """Custom option class for optparse.
681
682 """
683 ATTRS = Option.ATTRS + [
684 "completion_suggest",
685 ]
686 TYPES = Option.TYPES + (
687 "identkeyval",
688 "keyval",
689 "unit",
690 "bool",
691 "list",
692 "maybefloat",
693 )
694 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
695 TYPE_CHECKER["identkeyval"] = check_ident_key_val
696 TYPE_CHECKER["keyval"] = check_key_val
697 TYPE_CHECKER["unit"] = check_unit
698 TYPE_CHECKER["bool"] = check_bool
699 TYPE_CHECKER["list"] = check_list
700 TYPE_CHECKER["maybefloat"] = check_maybefloat
701
702
703 # optparse.py sets make_option, so we do it for our own option class, too
704 cli_option = CliOption
705
706
707 _YORNO = "yes|no"
708
709 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
710 help="Increase debugging level")
711
712 NOHDR_OPT = cli_option("--no-headers", default=False,
713 action="store_true", dest="no_headers",
714 help="Don't display column headers")
715
716 SEP_OPT = cli_option("--separator", default=None,
717 action="store", dest="separator",
718 help=("Separator between output fields"
719 " (defaults to one space)"))
720
721 USEUNITS_OPT = cli_option("--units", default=None,
722 dest="units", choices=("h", "m", "g", "t"),
723 help="Specify units for output (one of h/m/g/t)")
724
725 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
726 type="string", metavar="FIELDS",
727 help="Comma separated list of output fields")
728
729 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
730 default=False, help="Force the operation")
731
732 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
733 default=False, help="Do not require confirmation")
734
735 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
736 action="store_true", default=False,
737 help=("Ignore offline nodes and do as much"
738 " as possible"))
739
740 TAG_ADD_OPT = cli_option("--tags", dest="tags",
741 default=None, help="Comma-separated list of instance"
742 " tags")
743
744 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
745 default=None, help="File with tag names")
746
747 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
748 default=False, action="store_true",
749 help=("Submit the job and return the job ID, but"
750 " don't wait for the job to finish"))
751
752 SYNC_OPT = cli_option("--sync", dest="do_locking",
753 default=False, action="store_true",
754 help=("Grab locks while doing the queries"
755 " in order to ensure more consistent results"))
756
757 DRY_RUN_OPT = cli_option("--dry-run", default=False,
758 action="store_true",
759 help=("Do not execute the operation, just run the"
760 " check steps and verify if it could be"
761 " executed"))
762
763 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
764 action="store_true",
765 help="Increase the verbosity of the operation")
766
767 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
768 action="store_true", dest="simulate_errors",
769 help="Debugging option that makes the operation"
770 " treat most runtime checks as failed")
771
772 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
773 default=True, action="store_false",
774 help="Don't wait for sync (DANGEROUS!)")
775
776 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
777 default=False, action="store_true",
778 help="Wait for disks to sync")
779
780 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
781 action="store_true", default=False,
782 help="Enable offline instance")
783
784 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
785 action="store_true", default=False,
786 help="Disable down instance")
787
788 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
789 help=("Custom disk setup (%s)" %
790 utils.CommaJoin(constants.DISK_TEMPLATES)),
791 default=None, metavar="TEMPL",
792 choices=list(constants.DISK_TEMPLATES))
793
794 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
795 help="Do not create any network cards for"
796 " the instance")
797
798 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
799 help="Relative path under default cluster-wide"
800 " file storage dir to store file-based disks",
801 default=None, metavar="<DIR>")
802
803 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
804 help="Driver to use for image files",
805 default="loop", metavar="<DRIVER>",
806 choices=list(constants.FILE_DRIVER))
807
808 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
809 help="Select nodes for the instance automatically"
810 " using the <NAME> iallocator plugin",
811 default=None, type="string",
812 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
813
814 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
815 metavar="<NAME>",
816 help="Set the default instance"
817 " allocator plugin",
818 default=None, type="string",
819 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
820
821 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
822 metavar="<os>",
823 completion_suggest=OPT_COMPL_ONE_OS)
824
825 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
826 type="keyval", default={},
827 help="OS parameters")
828
829 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
830 action="store_true", default=False,
831 help="Force an unknown variant")
832
833 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
834 action="store_true", default=False,
835 help="Do not install the OS (will"
836 " enable no-start)")
837
838 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
839 dest="allow_runtime_chgs",
840 default=True, action="store_false",
841 help="Don't allow runtime changes")
842
843 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
844 type="keyval", default={},
845 help="Backend parameters")
846
847 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
848 default={}, dest="hvparams",
849 help="Hypervisor parameters")
850
851 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
852 help="Disk template parameters, in the format"
853 " template:option=value,option=value,...",
854 type="identkeyval", action="append", default=[])
855
856 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
857 type="keyval", default={},
858 help="Memory size specs: list of key=value,"
859 " where key is one of min, max, std"
860 " (in MB or using a unit)")
861
862 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
863 type="keyval", default={},
864 help="CPU count specs: list of key=value,"
865 " where key is one of min, max, std")
866
867 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
868 dest="ispecs_disk_count",
869 type="keyval", default={},
870 help="Disk count specs: list of key=value,"
871 " where key is one of min, max, std")
872
873 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
874 type="keyval", default={},
875 help="Disk size specs: list of key=value,"
876 " where key is one of min, max, std"
877 " (in MB or using a unit)")
878
879 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
880 type="keyval", default={},
881 help="NIC count specs: list of key=value,"
882 " where key is one of min, max, std")
883
884 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
885 dest="ipolicy_disk_templates",
886 type="list", default=None,
887 help="Comma-separated list of"
888 " enabled disk templates")
889
890 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
891 dest="ipolicy_vcpu_ratio",
892 type="maybefloat", default=None,
893 help="The maximum allowed vcpu-to-cpu ratio")
894
895 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
896 dest="ipolicy_spindle_ratio",
897 type="maybefloat", default=None,
898 help=("The maximum allowed instances to"
899 " spindle ratio"))
900
901 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
902 help="Hypervisor and hypervisor options, in the"
903 " format hypervisor:option=value,option=value,...",
904 default=None, type="identkeyval")
905
906 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
907 help="Hypervisor and hypervisor options, in the"
908 " format hypervisor:option=value,option=value,...",
909 default=[], action="append", type="identkeyval")
910
911 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
912 action="store_false",
913 help="Don't check that the instance's IP"
914 " is alive")
915
916 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
917 default=True, action="store_false",
918 help="Don't check that the instance's name"
919 " is resolvable")
920
921 NET_OPT = cli_option("--net",
922 help="NIC parameters", default=[],
923 dest="nics", action="append", type="identkeyval")
924
925 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
926 dest="disks", action="append", type="identkeyval")
927
928 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
929 help="Comma-separated list of disks"
930 " indices to act on (e.g. 0,2) (optional,"
931 " defaults to all disks)")
932
933 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
934 help="Enforces a single-disk configuration using the"
935 " given disk size, in MiB unless a suffix is used",
936 default=None, type="unit", metavar="<size>")
937
938 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
939 dest="ignore_consistency",
940 action="store_true", default=False,
941 help="Ignore the consistency of the disks on"
942 " the secondary")
943
944 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
945 dest="allow_failover",
946 action="store_true", default=False,
947 help="If migration is not possible fallback to"
948 " failover")
949
950 NONLIVE_OPT = cli_option("--non-live", dest="live",
951 default=True, action="store_false",
952 help="Do a non-live migration (this usually means"
953 " freeze the instance, save the state, transfer and"
954 " only then resume running on the secondary node)")
955
956 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
957 default=None,
958 choices=list(constants.HT_MIGRATION_MODES),
959 help="Override default migration mode (choose"
960 " either live or non-live")
961
962 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
963 help="Target node and optional secondary node",
964 metavar="<pnode>[:<snode>]",
965 completion_suggest=OPT_COMPL_INST_ADD_NODES)
966
967 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
968 action="append", metavar="<node>",
969 help="Use only this node (can be used multiple"
970 " times, if not given defaults to all nodes)",
971 completion_suggest=OPT_COMPL_ONE_NODE)
972
973 NODEGROUP_OPT_NAME = "--node-group"
974 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
975 dest="nodegroup",
976 help="Node group (name or uuid)",
977 metavar="<nodegroup>",
978 default=None, type="string",
979 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
980
981 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
982 metavar="<node>",
983 completion_suggest=OPT_COMPL_ONE_NODE)
984
985 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
986 action="store_false",
987 help="Don't start the instance after creation")
988
989 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
990 action="store_true", default=False,
991 help="Show command instead of executing it")
992
993 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
994 default=False, action="store_true",
995 help="Instead of performing the migration, try to"
996 " recover from a failed cleanup. This is safe"
997 " to run even if the instance is healthy, but it"
998 " will create extra replication traffic and "
999 " disrupt briefly the replication (like during the"
1000 " migration")
1001
1002 STATIC_OPT = cli_option("-s", "--static", dest="static",
1003 action="store_true", default=False,
1004 help="Only show configuration data, not runtime data")
1005
1006 ALL_OPT = cli_option("--all", dest="show_all",
1007 default=False, action="store_true",
1008 help="Show info on all instances on the cluster."
1009 " This can take a long time to run, use wisely")
1010
1011 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1012 action="store_true", default=False,
1013 help="Interactive OS reinstall, lists available"
1014 " OS templates for selection")
1015
1016 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1017 action="store_true", default=False,
1018 help="Remove the instance from the cluster"
1019 " configuration even if there are failures"
1020 " during the removal process")
1021
1022 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1023 dest="ignore_remove_failures",
1024 action="store_true", default=False,
1025 help="Remove the instance from the"
1026 " cluster configuration even if there"
1027 " are failures during the removal"
1028 " process")
1029
1030 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1031 action="store_true", default=False,
1032 help="Remove the instance from the cluster")
1033
1034 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1035 help="Specifies the new node for the instance",
1036 metavar="NODE", default=None,
1037 completion_suggest=OPT_COMPL_ONE_NODE)
1038
1039 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1040 help="Specifies the new secondary node",
1041 metavar="NODE", default=None,
1042 completion_suggest=OPT_COMPL_ONE_NODE)
1043
1044 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1045 default=False, action="store_true",
1046 help="Replace the disk(s) on the primary"
1047 " node (applies only to internally mirrored"
1048 " disk templates, e.g. %s)" %
1049 utils.CommaJoin(constants.DTS_INT_MIRROR))
1050
1051 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1052 default=False, action="store_true",
1053 help="Replace the disk(s) on the secondary"
1054 " node (applies only to internally mirrored"
1055 " disk templates, e.g. %s)" %
1056 utils.CommaJoin(constants.DTS_INT_MIRROR))
1057
1058 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1059 default=False, action="store_true",
1060 help="Lock all nodes and auto-promote as needed"
1061 " to MC status")
1062
1063 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1064 default=False, action="store_true",
1065 help="Automatically replace faulty disks"
1066 " (applies only to internally mirrored"
1067 " disk templates, e.g. %s)" %
1068 utils.CommaJoin(constants.DTS_INT_MIRROR))
1069
1070 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1071 default=False, action="store_true",
1072 help="Ignore current recorded size"
1073 " (useful for forcing activation when"
1074 " the recorded size is wrong)")
1075
1076 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1077 metavar="<node>",
1078 completion_suggest=OPT_COMPL_ONE_NODE)
1079
1080 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1081 metavar="<dir>")
1082
1083 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1084 help="Specify the secondary ip for the node",
1085 metavar="ADDRESS", default=None)
1086
1087 READD_OPT = cli_option("--readd", dest="readd",
1088 default=False, action="store_true",
1089 help="Readd old node after replacing it")
1090
1091 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1092 default=True, action="store_false",
1093 help="Disable SSH key fingerprint checking")
1094
1095 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1096 default=False, action="store_true",
1097 help="Force the joining of a node")
1098
1099 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1100 type="bool", default=None, metavar=_YORNO,
1101 help="Set the master_candidate flag on the node")
1102
1103 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1104 type="bool", default=None,
1105 help=("Set the offline flag on the node"
1106 " (cluster does not communicate with offline"
1107 " nodes)"))
1108
1109 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1110 type="bool", default=None,
1111 help=("Set the drained flag on the node"
1112 " (excluded from allocation operations)"))
1113
1114 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1115 type="bool", default=None, metavar=_YORNO,
1116 help="Set the master_capable flag on the node")
1117
1118 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1119 type="bool", default=None, metavar=_YORNO,
1120 help="Set the vm_capable flag on the node")
1121
1122 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1123 type="bool", default=None, metavar=_YORNO,
1124 help="Set the allocatable flag on a volume")
1125
1126 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1127 help="Disable support for lvm based instances"
1128 " (cluster-wide)",
1129 action="store_false", default=True)
1130
1131 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1132 dest="enabled_hypervisors",
1133 help="Comma-separated list of hypervisors",
1134 type="string", default=None)
1135
1136 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1137 type="keyval", default={},
1138 help="NIC parameters")
1139
1140 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1141 dest="candidate_pool_size", type="int",
1142 help="Set the candidate pool size")
1143
1144 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1145 help=("Enables LVM and specifies the volume group"
1146 " name (cluster-wide) for disk allocation"
1147 " [%s]" % constants.DEFAULT_VG),
1148 metavar="VG", default=None)
1149
1150 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1151 help="Destroy cluster", action="store_true")
1152
1153 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1154 help="Skip node agreement check (dangerous)",
1155 action="store_true", default=False)
1156
1157 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1158 help="Specify the mac prefix for the instance IP"
1159 " addresses, in the format XX:XX:XX",
1160 metavar="PREFIX",
1161 default=None)
1162
1163 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1164 help="Specify the node interface (cluster-wide)"
1165 " on which the master IP address will be added"
1166 " (cluster init default: %s)" %
1167 constants.DEFAULT_BRIDGE,
1168 metavar="NETDEV",
1169 default=None)
1170
1171 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1172 help="Specify the netmask of the master IP",
1173 metavar="NETMASK",
1174 default=None)
1175
1176 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1177 dest="use_external_mip_script",
1178 help="Specify whether to run a"
1179 " user-provided script for the master"
1180 " IP address turnup and"
1181 " turndown operations",
1182 type="bool", metavar=_YORNO, default=None)
1183
1184 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1185 help="Specify the default directory (cluster-"
1186 "wide) for storing the file-based disks [%s]" %
1187 pathutils.DEFAULT_FILE_STORAGE_DIR,
1188 metavar="DIR",
1189 default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1190
1191 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1192 "--shared-file-storage-dir",
1193 dest="shared_file_storage_dir",
1194 help="Specify the default directory (cluster-wide) for storing the"
1195 " shared file-based disks [%s]" %
1196 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1197 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1198
1199 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1200 help="Don't modify %s" % pathutils.ETC_HOSTS,
1201 action="store_false", default=True)
1202
1203 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1204 help="Don't initialize SSH keys",
1205 action="store_false", default=True)
1206
1207 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1208 help="Enable parseable error messages",
1209 action="store_true", default=False)
1210
1211 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1212 help="Skip N+1 memory redundancy tests",
1213 action="store_true", default=False)
1214
1215 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1216 help="Type of reboot: soft/hard/full",
1217 default=constants.INSTANCE_REBOOT_HARD,
1218 metavar="<REBOOT>",
1219 choices=list(constants.REBOOT_TYPES))
1220
1221 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1222 dest="ignore_secondaries",
1223 default=False, action="store_true",
1224 help="Ignore errors from secondaries")
1225
1226 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1227 action="store_false", default=True,
1228 help="Don't shutdown the instance (unsafe)")
1229
1230 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1231 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1232 help="Maximum time to wait")
1233
1234 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1235 dest="shutdown_timeout", type="int",
1236 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1237 help="Maximum time to wait for instance"
1238 " shutdown")
1239
1240 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1241 default=None,
1242 help=("Number of seconds between repetions of the"
1243 " command"))
1244
1245 EARLY_RELEASE_OPT = cli_option("--early-release",
1246 dest="early_release", default=False,
1247 action="store_true",
1248 help="Release the locks on the secondary"
1249 " node(s) early")
1250
1251 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1252 dest="new_cluster_cert",
1253 default=False, action="store_true",
1254 help="Generate a new cluster certificate")
1255
1256 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1257 default=None,
1258 help="File containing new RAPI certificate")
1259
1260 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1261 default=None, action="store_true",
1262 help=("Generate a new self-signed RAPI"
1263 " certificate"))
1264
1265 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1266 default=None,
1267 help="File containing new SPICE certificate")
1268
1269 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1270 default=None,
1271 help="File containing the certificate of the CA"
1272 " which signed the SPICE certificate")
1273
1274 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1275 dest="new_spice_cert", default=None,
1276 action="store_true",
1277 help=("Generate a new self-signed SPICE"
1278 " certificate"))
1279
1280 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1281 dest="new_confd_hmac_key",
1282 default=False, action="store_true",
1283 help=("Create a new HMAC key for %s" %
1284 constants.CONFD))
1285
1286 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1287 dest="cluster_domain_secret",
1288 default=None,
1289 help=("Load new new cluster domain"
1290 " secret from file"))
1291
1292 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1293 dest="new_cluster_domain_secret",
1294 default=False, action="store_true",
1295 help=("Create a new cluster domain"
1296 " secret"))
1297
1298 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1299 dest="use_replication_network",
1300 help="Whether to use the replication network"
1301 " for talking to the nodes",
1302 action="store_true", default=False)
1303
1304 MAINTAIN_NODE_HEALTH_OPT = \
1305 cli_option("--maintain-node-health", dest="maintain_node_health",
1306 metavar=_YORNO, default=None, type="bool",
1307 help="Configure the cluster to automatically maintain node"
1308 " health, by shutting down unknown instances, shutting down"
1309 " unknown DRBD devices, etc.")
1310
1311 IDENTIFY_DEFAULTS_OPT = \
1312 cli_option("--identify-defaults", dest="identify_defaults",
1313 default=False, action="store_true",
1314 help="Identify which saved instance parameters are equal to"
1315 " the current cluster defaults and set them as such, instead"
1316 " of marking them as overridden")
1317
1318 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1319 action="store", dest="uid_pool",
1320 help=("A list of user-ids or user-id"
1321 " ranges separated by commas"))
1322
1323 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1324 action="store", dest="add_uids",
1325 help=("A list of user-ids or user-id"
1326 " ranges separated by commas, to be"
1327 " added to the user-id pool"))
1328
1329 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1330 action="store", dest="remove_uids",
1331 help=("A list of user-ids or user-id"
1332 " ranges separated by commas, to be"
1333 " removed from the user-id pool"))
1334
1335 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1336 action="store", dest="reserved_lvs",
1337 help=("A comma-separated list of reserved"
1338 " logical volumes names, that will be"
1339 " ignored by cluster verify"))
1340
1341 ROMAN_OPT = cli_option("--roman",
1342 dest="roman_integers", default=False,
1343 action="store_true",
1344 help="Use roman numbers for positive integers")
1345
1346 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1347 action="store", default=None,
1348 help="Specifies usermode helper for DRBD")
1349
1350 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1351 action="store_false", default=True,
1352 help="Disable support for DRBD")
1353
1354 PRIMARY_IP_VERSION_OPT = \
1355 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1356 action="store", dest="primary_ip_version",
1357 metavar="%d|%d" % (constants.IP4_VERSION,
1358 constants.IP6_VERSION),
1359 help="Cluster-wide IP version for primary IP")
1360
1361 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1362 action="store_true",
1363 help="Show machine name for every line in output")
1364
1365
1366 def _PriorityOptionCb(option, _, value, parser):
1367 """Callback for processing C{--priority} option.
1368
1369 """
1370 value = _PRIONAME_TO_VALUE[value]
1371
1372 setattr(parser.values, option.dest, value)
1373
1374
1375 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1376 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1377 choices=_PRIONAME_TO_VALUE.keys(),
1378 action="callback", type="choice",
1379 callback=_PriorityOptionCb,
1380 help="Priority for opcode processing")
1381
1382 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1383 type="bool", default=None, metavar=_YORNO,
1384 help="Sets the hidden flag on the OS")
1385
1386 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1387 type="bool", default=None, metavar=_YORNO,
1388 help="Sets the blacklisted flag on the OS")
1389
1390 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1391 type="bool", metavar=_YORNO,
1392 dest="prealloc_wipe_disks",
1393 help=("Wipe disks prior to instance"
1394 " creation"))
1395
1396 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1397 type="keyval", default=None,
1398 help="Node parameters")
1399
1400 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1401 action="store", metavar="POLICY", default=None,
1402 help="Allocation policy for the node group")
1403
1404 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1405 type="bool", metavar=_YORNO,
1406 dest="node_powered",
1407 help="Specify if the SoR for node is powered")
1408
1409 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1410 default=constants.OOB_TIMEOUT,
1411 help="Maximum time to wait for out-of-band helper")
1412
1413 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1414 default=constants.OOB_POWER_DELAY,
1415 help="Time in seconds to wait between power-ons")
1416
1417 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1418 action="store_true", default=False,
1419 help=("Whether command argument should be treated"
1420 " as filter"))
1421
1422 NO_REMEMBER_OPT = cli_option("--no-remember",
1423 dest="no_remember",
1424 action="store_true", default=False,
1425 help="Perform but do not record the change"
1426 " in the configuration")
1427
1428 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1429 default=False, action="store_true",
1430 help="Evacuate primary instances only")
1431
1432 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1433 default=False, action="store_true",
1434 help="Evacuate secondary instances only"
1435 " (applies only to internally mirrored"
1436 " disk templates, e.g. %s)" %
1437 utils.CommaJoin(constants.DTS_INT_MIRROR))
1438
1439 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1440 action="store_true", default=False,
1441 help="Pause instance at startup")
1442
1443 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1444 help="Destination node group (name or uuid)",
1445 default=None, action="append",
1446 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1447
1448 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1449 action="append", dest="ignore_errors",
1450 choices=list(constants.CV_ALL_ECODES_STRINGS),
1451 help="Error code to be ignored")
1452
1453 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1454 action="append",
1455 help=("Specify disk state information in the"
1456 " format"
1457 " storage_type/identifier:option=value,...;"
1458 " note this is unused for now"),
1459 type="identkeyval")
1460
1461 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1462 action="append",
1463 help=("Specify hypervisor state information in the"
1464 " format hypervisor:option=value,...;"
1465 " note this is unused for now"),
1466 type="identkeyval")
1467
1468 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1469 action="store_true", default=False,
1470 help="Ignore instance policy violations")
1471
1472 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1473 help="Sets the instance's runtime memory,"
1474 " ballooning it up or down to the new value",
1475 default=None, type="unit", metavar="<size>")
1476
1477 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1478 action="store_true", default=False,
1479 help="Marks the grow as absolute instead of the"
1480 " (default) relative mode")
1481
1482 NETWORK_OPT = cli_option("--network",
1483 action="store", default=None, dest="network",
1484 help="IP network in CIDR notation")
1485
1486 GATEWAY_OPT = cli_option("--gateway",
1487 action="store", default=None, dest="gateway",
1488 help="IP address of the router (gateway)")
1489
1490 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1491 action="store", default=None,
1492 dest="add_reserved_ips",
1493 help="Comma-separated list of"
1494 " reserved IPs to add")
1495
1496 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1497 action="store", default=None,
1498 dest="remove_reserved_ips",
1499 help="Comma-delimited list of"
1500 " reserved IPs to remove")
1501
1502 NETWORK_TYPE_OPT = cli_option("--network-type",
1503 action="store", default=None, dest="network_type",
1504 help="Network type: private, public, None")
1505
1506 NETWORK6_OPT = cli_option("--network6",
1507 action="store", default=None, dest="network6",
1508 help="IP network in CIDR notation")
1509
1510 GATEWAY6_OPT = cli_option("--gateway6",
1511 action="store", default=None, dest="gateway6",
1512 help="IP6 address of the router (gateway)")
1513
1514 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1515 dest="conflicts_check",
1516 default=True,
1517 action="store_false",
1518 help="Don't check for conflicting IPs")
1519
1520 #: Options provided by all commands
1521 COMMON_OPTS = [DEBUG_OPT]
1522
1523 # common options for creating instances. add and import then add their own
1524 # specific ones.
1525 COMMON_CREATE_OPTS = [
1526 BACKEND_OPT,
1527 DISK_OPT,
1528 DISK_TEMPLATE_OPT,
1529 FILESTORE_DIR_OPT,
1530 FILESTORE_DRIVER_OPT,
1531 HYPERVISOR_OPT,
1532 IALLOCATOR_OPT,
1533 NET_OPT,
1534 NODE_PLACEMENT_OPT,
1535 NOIPCHECK_OPT,
1536 NOCONFLICTSCHECK_OPT,
1537 NONAMECHECK_OPT,
1538 NONICS_OPT,
1539 NWSYNC_OPT,
1540 OSPARAMS_OPT,
1541 OS_SIZE_OPT,
1542 SUBMIT_OPT,
1543 TAG_ADD_OPT,
1544 DRY_RUN_OPT,
1545 PRIORITY_OPT,
1546 ]
1547
1548 # common instance policy options
1549 INSTANCE_POLICY_OPTS = [
1550 SPECS_CPU_COUNT_OPT,
1551 SPECS_DISK_COUNT_OPT,
1552 SPECS_DISK_SIZE_OPT,
1553 SPECS_MEM_SIZE_OPT,
1554 SPECS_NIC_COUNT_OPT,
1555 IPOLICY_DISK_TEMPLATES,
1556 IPOLICY_VCPU_RATIO,
1557 IPOLICY_SPINDLE_RATIO,
1558 ]
1559
1560
1561 class _ShowUsage(Exception):
1562 """Exception class for L{_ParseArgs}.
1563
1564 """
1565 def __init__(self, exit_error):
1566 """Initializes instances of this class.
1567
1568 @type exit_error: bool
1569 @param exit_error: Whether to report failure on exit
1570
1571 """
1572 Exception.__init__(self)
1573 self.exit_error = exit_error
1574
1575
1576 class _ShowVersion(Exception):
1577 """Exception class for L{_ParseArgs}.
1578
1579 """
1580
1581
1582 def _ParseArgs(binary, argv, commands, aliases, env_override):
1583 """Parser for the command line arguments.
1584
1585 This function parses the arguments and returns the function which
1586 must be executed together with its (modified) arguments.
1587
1588 @param binary: Script name
1589 @param argv: Command line arguments
1590 @param commands: Dictionary containing command definitions
1591 @param aliases: dictionary with command aliases {"alias": "target", ...}
1592 @param env_override: list of env variables allowed for default args
1593 @raise _ShowUsage: If usage description should be shown
1594 @raise _ShowVersion: If version should be shown
1595
1596 """
1597 assert not (env_override - set(commands))
1598 assert not (set(aliases.keys()) & set(commands.keys()))
1599
1600 if len(argv) > 1:
1601 cmd = argv[1]
1602 else:
1603 # No option or command given
1604 raise _ShowUsage(exit_error=True)
1605
1606 if cmd == "--version":
1607 raise _ShowVersion()
1608 elif cmd == "--help":
1609 raise _ShowUsage(exit_error=False)
1610 elif not (cmd in commands or cmd in aliases):
1611 raise _ShowUsage(exit_error=True)
1612
1613 # get command, unalias it, and look it up in commands
1614 if cmd in aliases:
1615 if aliases[cmd] not in commands:
1616 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1617 " command '%s'" % (cmd, aliases[cmd]))
1618
1619 cmd = aliases[cmd]
1620
1621 if cmd in env_override:
1622 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1623 env_args = os.environ.get(args_env_name)
1624 if env_args:
1625 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1626
1627 func, args_def, parser_opts, usage, description = commands[cmd]
1628 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1629 description=description,
1630 formatter=TitledHelpFormatter(),
1631 usage="%%prog %s %s" % (cmd, usage))
1632 parser.disable_interspersed_args()
1633 options, args = parser.parse_args(args=argv[2:])
1634
1635 if not _CheckArguments(cmd, args_def, args):
1636 return None, None, None
1637
1638 return func, options, args
1639
1640
1641 def _FormatUsage(binary, commands):
1642 """Generates a nice description of all commands.
1643
1644 @param binary: Script name
1645 @param commands: Dictionary containing command definitions
1646
1647 """
1648 # compute the max line length for cmd + usage
1649 mlen = min(60, max(map(len, commands)))
1650
1651 yield "Usage: %s {command} [options...] [argument...]" % binary
1652 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1653 yield ""
1654 yield "Commands:"
1655
1656 # and format a nice command list
1657 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1658 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1659 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1660 for line in help_lines:
1661 yield " %-*s %s" % (mlen, "", line)
1662
1663 yield ""
1664
1665
1666 def _CheckArguments(cmd, args_def, args):
1667 """Verifies the arguments using the argument definition.
1668
1669 Algorithm:
1670
1671 1. Abort with error if values specified by user but none expected.
1672
1673 1. For each argument in definition
1674
1675 1. Keep running count of minimum number of values (min_count)
1676 1. Keep running count of maximum number of values (max_count)
1677 1. If it has an unlimited number of values
1678
1679 1. Abort with error if it's not the last argument in the definition
1680
1681 1. If last argument has limited number of values
1682
1683 1. Abort with error if number of values doesn't match or is too large
1684
1685 1. Abort with error if user didn't pass enough values (min_count)
1686
1687 """
1688 if args and not args_def:
1689 ToStderr("Error: Command %s expects no arguments", cmd)
1690 return False
1691
1692 min_count = None
1693 max_count = None
1694 check_max = None
1695
1696 last_idx = len(args_def) - 1
1697
1698 for idx, arg in enumerate(args_def):
1699 if min_count is None:
1700 min_count = arg.min
1701 elif arg.min is not None:
1702 min_count += arg.min
1703
1704 if max_count is None:
1705 max_count = arg.max
1706 elif arg.max is not None:
1707 max_count += arg.max
1708
1709 if idx == last_idx:
1710 check_max = (arg.max is not None)
1711
1712 elif arg.max is None:
1713 raise errors.ProgrammerError("Only the last argument can have max=None")
1714
1715 if check_max:
1716 # Command with exact number of arguments
1717 if (min_count is not None and max_count is not None and
1718 min_count == max_count and len(args) != min_count):
1719 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1720 return False
1721
1722 # Command with limited number of arguments
1723 if max_count is not None and len(args) > max_count:
1724 ToStderr("Error: Command %s expects only %d argument(s)",
1725 cmd, max_count)
1726 return False
1727
1728 # Command with some required arguments
1729 if min_count is not None and len(args) < min_count:
1730 ToStderr("Error: Command %s expects at least %d argument(s)",
1731 cmd, min_count)
1732 return False
1733
1734 return True
1735
1736
1737 def SplitNodeOption(value):
1738 """Splits the value of a --node option.
1739
1740 """
1741 if value and ":" in value:
1742 return value.split(":", 1)
1743 else:
1744 return (value, None)
1745
1746
1747 def CalculateOSNames(os_name, os_variants):
1748 """Calculates all the names an OS can be called, according to its variants.
1749
1750 @type os_name: string
1751 @param os_name: base name of the os
1752 @type os_variants: list or None
1753 @param os_variants: list of supported variants
1754 @rtype: list
1755 @return: list of valid names
1756
1757 """
1758 if os_variants:
1759 return ["%s+%s" % (os_name, v) for v in os_variants]
1760 else:
1761 return [os_name]
1762
1763
1764 def ParseFields(selected, default):
1765 """Parses the values of "--field"-like options.
1766
1767 @type selected: string or None
1768 @param selected: User-selected options
1769 @type default: list
1770 @param default: Default fields
1771
1772 """
1773 if selected is None:
1774 return default
1775
1776 if selected.startswith("+"):
1777 return default + selected[1:].split(",")
1778
1779 return selected.split(",")
1780
1781
1782 UsesRPC = rpc.RunWithRPC
1783
1784
1785 def AskUser(text, choices=None):
1786 """Ask the user a question.
1787
1788 @param text: the question to ask
1789
1790 @param choices: list with elements tuples (input_char, return_value,
1791 description); if not given, it will default to: [('y', True,
1792 'Perform the operation'), ('n', False, 'Do no do the operation')];
1793 note that the '?' char is reserved for help
1794
1795 @return: one of the return values from the choices list; if input is
1796 not possible (i.e. not running with a tty, we return the last
1797 entry from the list
1798
1799 """
1800 if choices is None:
1801 choices = [("y", True, "Perform the operation"),
1802 ("n", False, "Do not perform the operation")]
1803 if not choices or not isinstance(choices, list):
1804 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1805 for entry in choices:
1806 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1807 raise errors.ProgrammerError("Invalid choices element to AskUser")
1808
1809 answer = choices[-1][1]
1810 new_text = []
1811 for line in text.splitlines():
1812 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1813 text = "\n".join(new_text)
1814 try:
1815 f = file("/dev/tty", "a+")
1816 except IOError:
1817 return answer
1818 try:
1819 chars = [entry[0] for entry in choices]
1820 chars[-1] = "[%s]" % chars[-1]
1821 chars.append("?")
1822 maps = dict([(entry[0], entry[1]) for entry in choices])
1823 while True:
1824 f.write(text)
1825 f.write("\n")
1826 f.write("/".join(chars))
1827 f.write(": ")
1828 line = f.readline(2).strip().lower()
1829 if line in maps:
1830 answer = maps[line]
1831 break
1832 elif line == "?":
1833 for entry in choices:
1834 f.write(" %s - %s\n" % (entry[0], entry[2]))
1835 f.write("\n")
1836 continue
1837 finally:
1838 f.close()
1839 return answer
1840
1841
1842 class JobSubmittedException(Exception):
1843 """Job was submitted, client should exit.
1844
1845 This exception has one argument, the ID of the job that was
1846 submitted. The handler should print this ID.
1847
1848 This is not an error, just a structured way to exit from clients.
1849
1850 """
1851
1852
1853 def SendJob(ops, cl=None):
1854 """Function to submit an opcode without waiting for the results.
1855
1856 @type ops: list
1857 @param ops: list of opcodes
1858 @type cl: luxi.Client
1859 @param cl: the luxi client to use for communicating with the master;
1860 if None, a new client will be created
1861
1862 """
1863 if cl is None:
1864 cl = GetClient()
1865
1866 job_id = cl.SubmitJob(ops)
1867
1868 return job_id
1869
1870
1871 def GenericPollJob(job_id, cbs, report_cbs):
1872 """Generic job-polling function.
1873
1874 @type job_id: number
1875 @param job_id: Job ID
1876 @type cbs: Instance of L{JobPollCbBase}
1877 @param cbs: Data callbacks
1878 @type report_cbs: Instance of L{JobPollReportCbBase}
1879 @param report_cbs: Reporting callbacks
1880
1881 """
1882 prev_job_info = None
1883 prev_logmsg_serial = None
1884
1885 status = None
1886
1887 while True:
1888 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1889 prev_logmsg_serial)
1890 if not result:
1891 # job not found, go away!
1892 raise errors.JobLost("Job with id %s lost" % job_id)
1893
1894 if result == constants.JOB_NOTCHANGED:
1895 report_cbs.ReportNotChanged(job_id, status)
1896
1897 # Wait again
1898 continue
1899
1900 # Split result, a tuple of (field values, log entries)
1901 (job_info, log_entries) = result
1902 (status, ) = job_info
1903
1904 if log_entries:
1905 for log_entry in log_entries:
1906 (serial, timestamp, log_type, message) = log_entry
1907 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1908 log_type, message)
1909 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1910
1911 # TODO: Handle canceled and archived jobs
1912 elif status in (constants.JOB_STATUS_SUCCESS,
1913 constants.JOB_STATUS_ERROR,
1914 constants.JOB_STATUS_CANCELING,
1915 constants.JOB_STATUS_CANCELED):
1916 break
1917
1918 prev_job_info = job_info
1919
1920 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1921 if not jobs:
1922 raise errors.JobLost("Job with id %s lost" % job_id)
1923
1924 status, opstatus, result = jobs[0]
1925
1926 if status == constants.JOB_STATUS_SUCCESS:
1927 return result
1928
1929 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1930 raise errors.OpExecError("Job was canceled")
1931
1932 has_ok = False
1933 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1934 if status == constants.OP_STATUS_SUCCESS:
1935 has_ok = True
1936 elif status == constants.OP_STATUS_ERROR:
1937 errors.MaybeRaise(msg)
1938
1939 if has_ok:
1940 raise errors.OpExecError("partial failure (opcode %d): %s" %
1941 (idx, msg))
1942
1943 raise errors.OpExecError(str(msg))
1944
1945 # default failure mode
1946 raise errors.OpExecError(result)
1947
1948
1949 class JobPollCbBase:
1950 """Base class for L{GenericPollJob} callbacks.
1951
1952 """
1953 def __init__(self):
1954 """Initializes this class.
1955
1956 """
1957
1958 def WaitForJobChangeOnce(self, job_id, fields,
1959 prev_job_info, prev_log_serial):
1960 """Waits for changes on a job.
1961
1962 """
1963 raise NotImplementedError()
1964
1965 def QueryJobs(self, job_ids, fields):
1966 """Returns the selected fields for the selected job IDs.
1967
1968 @type job_ids: list of numbers
1969 @param job_ids: Job IDs
1970 @type fields: list of strings
1971 @param fields: Fields
1972
1973 """
1974 raise NotImplementedError()
1975
1976
1977 class JobPollReportCbBase:
1978 """Base class for L{GenericPollJob} reporting callbacks.
1979
1980 """
1981 def __init__(self):
1982 """Initializes this class.
1983
1984 """
1985
1986 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1987 """Handles a log message.
1988
1989 """
1990 raise NotImplementedError()
1991
1992 def ReportNotChanged(self, job_id, status):
1993 """Called for if a job hasn't changed in a while.
1994
1995 @type job_id: number
1996 @param job_id: Job ID
1997 @type status: string or None
1998 @param status: Job status if available
1999
2000 """
2001 raise NotImplementedError()
2002
2003
2004 class _LuxiJobPollCb(JobPollCbBase):
2005 def __init__(self, cl):
2006 """Initializes this class.
2007
2008 """
2009 JobPollCbBase.__init__(self)
2010 self.cl = cl
2011
2012 def WaitForJobChangeOnce(self, job_id, fields,
2013 prev_job_info, prev_log_serial):
2014 """Waits for changes on a job.
2015
2016 """
2017 return self.cl.WaitForJobChangeOnce(job_id, fields,
2018 prev_job_info, prev_log_serial)
2019
2020 def QueryJobs(self, job_ids, fields):
2021 """Returns the selected fields for the selected job IDs.
2022
2023 """
2024 return self.cl.QueryJobs(job_ids, fields)
2025
2026
2027 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2028 def __init__(self, feedback_fn):
2029 """Initializes this class.
2030
2031 """
2032 JobPollReportCbBase.__init__(self)
2033
2034 self.feedback_fn = feedback_fn
2035
2036 assert callable(feedback_fn)
2037
2038 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2039 """Handles a log message.
2040
2041 """
2042 self.feedback_fn((timestamp, log_type, log_msg))
2043
2044 def ReportNotChanged(self, job_id, status):
2045 """Called if a job hasn't changed in a while.
2046
2047 """
2048 # Ignore
2049
2050
2051 class StdioJobPollReportCb(JobPollReportCbBase):
2052 def __init__(self):
2053 """Initializes this class.
2054
2055 """
2056 JobPollReportCbBase.__init__(self)
2057
2058 self.notified_queued = False
2059 self.notified_waitlock = False
2060
2061 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2062 """Handles a log message.
2063
2064 """
2065 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2066 FormatLogMessage(log_type, log_msg))
2067
2068 def ReportNotChanged(self, job_id, status):
2069 """Called if a job hasn't changed in a while.
2070
2071 """
2072 if status is None:
2073 return
2074
2075 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2076 ToStderr("Job %s is waiting in queue", job_id)
2077 self.notified_queued = True
2078
2079 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2080 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2081 self.notified_waitlock = True
2082
2083
2084 def FormatLogMessage(log_type, log_msg):
2085 """Formats a job message according to its type.
2086
2087 """
2088 if log_type != constants.ELOG_MESSAGE:
2089 log_msg = str(log_msg)
2090
2091 return utils.SafeEncode(log_msg)
2092
2093
2094 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2095 """Function to poll for the result of a job.
2096
2097 @type job_id: job identified
2098 @param job_id: the job to poll for results
2099 @type cl: luxi.Client
2100 @param cl: the luxi client to use for communicating with the master;
2101 if None, a new client will be created
2102
2103 """
2104 if cl is None:
2105 cl = GetClient()
2106
2107 if reporter is None:
2108 if feedback_fn:
2109 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2110 else:
2111 reporter = StdioJobPollReportCb()
2112 elif feedback_fn:
2113 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2114
2115 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2116
2117
2118 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2119 """Legacy function to submit an opcode.
2120
2121 This is just a simple wrapper over the construction of the processor
2122 instance. It should be extended to better handle feedback and
2123 interaction functions.
2124
2125 """
2126 if cl is None:
2127 cl = GetClient()
2128
2129 SetGenericOpcodeOpts([op], opts)
2130
2131 job_id = SendJob([op], cl=cl)
2132
2133 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2134 reporter=reporter)
2135
2136 return op_results[0]
2137
2138
2139 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2140 """Wrapper around SubmitOpCode or SendJob.
2141
2142 This function will decide, based on the 'opts' parameter, whether to
2143 submit and wait for the result of the opcode (and return it), or
2144 whether to just send the job and print its identifier. It is used in
2145 order to simplify the implementation of the '--submit' option.
2146
2147 It will also process the opcodes if we're sending the via SendJob
2148 (otherwise SubmitOpCode does it).
2149
2150 """
2151 if opts and opts.submit_only:
2152 job = [op]
2153 SetGenericOpcodeOpts(job, opts)
2154 job_id = SendJob(job, cl=cl)
2155 raise JobSubmittedException(job_id)
2156 else:
2157 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2158
2159
2160 def SetGenericOpcodeOpts(opcode_list, options):
2161 """Processor for generic options.
2162
2163 This function updates the given opcodes based on generic command
2164 line options (like debug, dry-run, etc.).
2165
2166 @param opcode_list: list of opcodes
2167 @param options: command line options or None
2168 @return: None (in-place modification)
2169
2170 """
2171 if not options:
2172 return
2173 for op in opcode_list:
2174 op.debug_level = options.debug
2175 if hasattr(options, "dry_run"):
2176 op.dry_run = options.dry_run
2177 if getattr(options, "priority", None) is not None:
2178 op.priority = options.priority
2179
2180
2181 def GetClient(query=False):
2182 """Connects to the a luxi socket and returns a client.
2183
2184 @type query: boolean
2185 @param query: this signifies that the client will only be
2186 used for queries; if the build-time parameter
2187 enable-split-queries is enabled, then the client will be
2188 connected to the query socket instead of the masterd socket
2189
2190 """
2191 if query and constants.ENABLE_SPLIT_QUERY:
2192 address = pathutils.QUERY_SOCKET
2193 else:
2194 address = None
2195 # TODO: Cache object?
2196 try:
2197 client = luxi.Client(address=address)
2198 except luxi.NoMasterError:
2199 ss = ssconf.SimpleStore()
2200
2201 # Try to read ssconf file
2202 try:
2203 ss.GetMasterNode()
2204 except errors.ConfigurationError:
2205 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2206 " not part of a cluster",
2207 errors.ECODE_INVAL)
2208
2209 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2210 if master != myself:
2211 raise errors.OpPrereqError("This is not the master node, please connect"
2212 " to node '%s' and rerun the command" %
2213 master, errors.ECODE_INVAL)
2214 raise
2215 return client
2216
2217
2218 def FormatError(err):
2219 """Return a formatted error message for a given error.
2220
2221 This function takes an exception instance and returns a tuple
2222 consisting of two values: first, the recommended exit code, and
2223 second, a string describing the error message (not
2224 newline-terminated).
2225
2226 """
2227 retcode = 1
2228 obuf = StringIO()
2229 msg = str(err)
2230 if isinstance(err, errors.ConfigurationError):
2231 txt = "Corrupt configuration file: %s" % msg
2232 logging.error(txt)
2233 obuf.write(txt + "\n")
2234 obuf.write("Aborting.")
2235 retcode = 2
2236 elif isinstance(err, errors.HooksAbort):
2237 obuf.write("Failure: hooks execution failed:\n")
2238 for node, script, out in err.args[0]:
2239 if out:
2240 obuf.write(" node: %s, script: %s, output: %s\n" %
2241 (node, script, out))
2242 else:
2243 obuf.write(" node: %s, script: %s (no output)\n" %
2244 (node, script))
2245 elif isinstance(err, errors.HooksFailure):
2246 obuf.write("Failure: hooks general failure: %s" % msg)
2247 elif isinstance(err, errors.ResolverError):
2248 this_host = netutils.Hostname.GetSysName()
2249 if err.args[0] == this_host:
2250 msg = "Failure: can't resolve my own hostname ('%s')"
2251 else:
2252 msg = "Failure: can't resolve hostname '%s'"
2253 obuf.write(msg % err.args[0])
2254 elif isinstance(err, errors.OpPrereqError):
2255 if len(err.args) == 2:
2256 obuf.write("Failure: prerequisites not met for this"
2257 " operation:\nerror type: %s, error details:\n%s" %
2258 (err.args[1], err.args[0]))
2259 else:
2260 obuf.write("Failure: prerequisites not met for this"
2261 " operation:\n%s" % msg)
2262 elif isinstance(err, errors.OpExecError):
2263 obuf.write("Failure: command execution error:\n%s" % msg)
2264 elif isinstance(err, errors.TagError):
2265 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2266 elif isinstance(err, errors.JobQueueDrainError):
2267 obuf.write("Failure: the job queue is marked for drain and doesn't"
2268 " accept new requests\n")
2269 elif isinstance(err, errors.JobQueueFull):
2270 obuf.write("Failure: the job queue is full and doesn't accept new"
2271 " job submissions until old jobs are archived\n")
2272 elif isinstance(err, errors.TypeEnforcementError):
2273 obuf.write("Parameter Error: %s" % msg)
2274 elif isinstance(err, errors.ParameterError):
2275 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2276 elif isinstance(err, luxi.NoMasterError):
2277 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2278 " and listening for connections?")
2279 elif isinstance(err, luxi.TimeoutError):
2280 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2281 " been submitted and will continue to run even if the call"
2282 " timed out. Useful commands in this situation are \"gnt-job"
2283 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2284 obuf.write(msg)
2285 elif isinstance(err, luxi.PermissionError):
2286 obuf.write("It seems you don't have permissions to connect to the"
2287 " master daemon.\nPlease retry as a different user.")
2288 elif isinstance(err, luxi.ProtocolError):
2289 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2290 "%s" % msg)
2291 elif isinstance(err, errors.JobLost):
2292 obuf.write("Error checking job status: %s" % msg)
2293 elif isinstance(err, errors.QueryFilterParseError):
2294 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2295 obuf.write("\n".join(err.GetDetails()))
2296 elif isinstance(err, errors.GenericError):
2297 obuf.write("Unhandled Ganeti error: %s" % msg)
2298 elif isinstance(err, JobSubmittedException):
2299 obuf.write("JobID: %s\n" % err.args[0])
2300 retcode = 0
2301 else:
2302 obuf.write("Unhandled exception: %s" % msg)
2303 return retcode, obuf.getvalue().rstrip("\n")
2304
2305
2306 def GenericMain(commands, override=None, aliases=None,
2307 env_override=frozenset()):
2308 """Generic main function for all the gnt-* commands.
2309
2310 @param commands: a dictionary with a special structure, see the design doc
2311 for command line handling.
2312 @param override: if not None, we expect a dictionary with keys that will
2313 override command line options; this can be used to pass
2314 options from the scripts to generic functions
2315 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2316 @param env_override: list of environment names which are allowed to submit
2317 default args for commands
2318
2319 """
2320 # save the program name and the entire command line for later logging
2321 if sys.argv:
2322 binary = os.path.basename(sys.argv[0])
2323 if not binary:
2324 binary = sys.argv[0]
2325
2326 if len(sys.argv) >= 2:
2327 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2328 else:
2329 logname = binary
2330
2331 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2332 else:
2333 binary = "<unknown program>"
2334 cmdline = "<unknown>"
2335
2336 if aliases is None:
2337 aliases = {}
2338
2339 try:
2340 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2341 env_override)
2342 except _ShowVersion:
2343 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2344 constants.RELEASE_VERSION)
2345 return constants.EXIT_SUCCESS
2346 except _ShowUsage, err:
2347 for line in _FormatUsage(binary, commands):
2348 ToStdout(line)
2349
2350 if err.exit_error:
2351 return constants.EXIT_FAILURE
2352 else:
2353 return constants.EXIT_SUCCESS
2354 except errors.ParameterError, err:
2355 result, err_msg = FormatError(err)
2356 ToStderr(err_msg)
2357 return 1
2358
2359 if func is None: # parse error
2360 return 1
2361
2362 if override is not None:
2363 for key, val in override.iteritems():
2364 setattr(options, key, val)
2365
2366 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2367 stderr_logging=True)
2368
2369 logging.info("Command line: %s", cmdline)
2370
2371 try:
2372 result = func(options, args)
2373 except (errors.GenericError, luxi.ProtocolError,
2374 JobSubmittedException), err:
2375 result, err_msg = FormatError(err)
2376 logging.exception("Error during command processing")
2377 ToStderr(err_msg)
2378 except KeyboardInterrupt:
2379 result = constants.EXIT_FAILURE
2380 ToStderr("Aborted. Note that if the operation created any jobs, they"
2381 " might have been submitted and"
2382 " will continue to run in the background.")
2383 except IOError, err:
2384 if err.errno == errno.EPIPE:
2385 # our terminal went away, we'll exit
2386 sys.exit(constants.EXIT_FAILURE)
2387 else:
2388 raise
2389
2390 return result
2391
2392
2393 def ParseNicOption(optvalue):
2394 """Parses the value of the --net option(s).
2395
2396 """
2397 try:
2398 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2399 except (TypeError, ValueError), err:
2400 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2401 errors.ECODE_INVAL)
2402
2403 nics = [{}] * nic_max
2404 for nidx, ndict in optvalue:
2405 nidx = int(nidx)
2406
2407 if not isinstance(ndict, dict):
2408 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2409 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2410
2411 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2412
2413 nics[nidx] = ndict
2414
2415 return nics
2416
2417
2418 def GenericInstanceCreate(mode, opts, args):
2419 """Add an instance to the cluster via either creation or import.
2420
2421 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2422 @param opts: the command line options selected by the user
2423 @type args: list
2424 @param args: should contain only one element, the new instance name
2425 @rtype: int
2426 @return: the desired exit code
2427
2428 """
2429 instance = args[0]
2430
2431 (pnode, snode) = SplitNodeOption(opts.node)
2432
2433 hypervisor = None
2434 hvparams = {}
2435 if opts.hypervisor:
2436 hypervisor, hvparams = opts.hypervisor
2437
2438 if opts.nics:
2439 nics = ParseNicOption(opts.nics)
2440 elif opts.no_nics:
2441 # no nics
2442 nics = []
2443 elif mode == constants.INSTANCE_CREATE:
2444 # default of one nic, all auto
2445 nics = [{}]
2446 else:
2447 # mode == import
2448 nics = []
2449
2450 if opts.disk_template == constants.DT_DISKLESS:
2451 if opts.disks or opts.sd_size is not None:
2452 raise errors.OpPrereqError("Diskless instance but disk"
2453 " information passed", errors.ECODE_INVAL)
2454 disks = []
2455 else:
2456 if (not opts.disks and not opts.sd_size
2457 and mode == constants.INSTANCE_CREATE):
2458 raise errors.OpPrereqError("No disk information specified",
2459 errors.ECODE_INVAL)
2460 if opts.disks and opts.sd_size is not None:
2461 raise errors.OpPrereqError("Please use either the '--disk' or"
2462 " '-s' option", errors.ECODE_INVAL)
2463 if opts.sd_size is not None:
2464 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2465
2466 if opts.disks:
2467 try:
2468 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2469 except ValueError, err:
2470 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2471 errors.ECODE_INVAL)
2472 disks = [{}] * disk_max
2473 else:
2474 disks = []
2475 for didx, ddict in opts.disks:
2476 didx = int(didx)
2477 if not isinstance(ddict, dict):
2478 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2479 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2480 elif constants.IDISK_SIZE in ddict:
2481 if constants.IDISK_ADOPT in ddict:
2482 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2483 " (disk %d)" % didx, errors.ECODE_INVAL)
2484 try:
2485 ddict[constants.IDISK_SIZE] = \
2486 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2487 except ValueError, err:
2488 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2489 (didx, err), errors.ECODE_INVAL)
2490 elif constants.IDISK_ADOPT in ddict:
2491 if mode == constants.INSTANCE_IMPORT:
2492 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2493 " import", errors.ECODE_INVAL)
2494 ddict[constants.IDISK_SIZE] = 0
2495 else:
2496 raise errors.OpPrereqError("Missing size or adoption source for"
2497 " disk %d" % didx, errors.ECODE_INVAL)
2498 disks[didx] = ddict
2499
2500 if opts.tags is not None:
2501 tags = opts.tags.split(",")
2502 else:
2503 tags = []
2504
2505 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2506 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2507
2508 if mode == constants.INSTANCE_CREATE:
2509 start = opts.start
2510 os_type = opts.os
2511 force_variant = opts.force_variant
2512 src_node = None
2513 src_path = None
2514 no_install = opts.no_install
2515 identify_defaults = False
2516 elif mode == constants.INSTANCE_IMPORT:
2517 start = False
2518 os_type = None
2519 force_variant = False
2520 src_node = opts.src_node
2521 src_path = opts.src_dir
2522 no_install = None
2523 identify_defaults = opts.identify_defaults
2524 else:
2525 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2526
2527 op = opcodes.OpInstanceCreate(instance_name=instance,
2528 disks=disks,
2529 disk_template=opts.disk_template,
2530 nics=nics,
2531 conflicts_check=opts.conflicts_check,
2532 pnode=pnode, snode=snode,
2533 ip_check=opts.ip_check,
2534 name_check=opts.name_check,
2535 wait_for_sync=opts.wait_for_sync,
2536 file_storage_dir=opts.file_storage_dir,
2537 file_driver=opts.file_driver,
2538 iallocator=opts.iallocator,
2539 hypervisor=hypervisor,
2540 hvparams=hvparams,
2541 beparams=opts.beparams,
2542 osparams=opts.osparams,
2543 mode=mode,
2544 start=start,
2545 os_type=os_type,
2546 force_variant=force_variant,
2547 src_node=src_node,
2548 src_path=src_path,
2549 tags=tags,
2550 no_install=no_install,
2551 identify_defaults=identify_defaults,
2552 ignore_ipolicy=opts.ignore_ipolicy)
2553
2554 SubmitOrSend(op, opts)
2555 return 0
2556
2557
2558 class _RunWhileClusterStoppedHelper:
2559 """Helper class for L{RunWhileClusterStopped} to simplify state management
2560
2561 """
2562 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2563 """Initializes this class.
2564
2565 @type feedback_fn: callable
2566 @param feedback_fn: Feedback function
2567 @type cluster_name: string
2568 @param cluster_name: Cluster name
2569 @type master_node: string
2570 @param master_node Master node name
2571 @type online_nodes: list
2572 @param online_nodes: List of names of online nodes
2573
2574 """
2575 self.feedback_fn = feedback_fn
2576 self.cluster_name = cluster_name
2577 self.master_node = master_node
2578 self.online_nodes = online_nodes
2579
2580 self.ssh = ssh.SshRunner(self.cluster_name)
2581
2582 self.nonmaster_nodes = [name for name in online_nodes
2583 if name != master_node]
2584
2585 assert self.master_node not in self.nonmaster_nodes
2586
2587 def _RunCmd(self, node_name, cmd):
2588 """Runs a command on the local or a remote machine.
2589
2590 @type node_name: string
2591 @param node_name: Machine name
2592 @type cmd: list
2593 @param cmd: Command
2594
2595 """
2596 if node_name is None or node_name == self.master_node:
2597 # No need to use SSH
2598 result = utils.RunCmd(cmd)
2599 else:
2600 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2601 utils.ShellQuoteArgs(cmd))
2602
2603 if result.failed:
2604 errmsg = ["Failed to run command %s" % result.cmd]
2605 if node_name:
2606 errmsg.append("on node %s" % node_name)
2607 errmsg.append(": exitcode %s and error %s" %
2608 (result.exit_code, result.output))
2609 raise errors.OpExecError(" ".join(errmsg))
2610
2611 def Call(self, fn, *args):
2612 """Call function while all daemons are stopped.
2613
2614 @type fn: callable
2615 @param fn: Function to be called
2616
2617 """
2618 # Pause watcher by acquiring an exclusive lock on watcher state file
2619 self.feedback_fn("Blocking watcher")
2620 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2621 try:
2622 # TODO: Currently, this just blocks. There's no timeout.
2623 # TODO: Should it be a shared lock?
2624 watcher_block.Exclusive(blocking=True)
2625
2626 # Stop master daemons, so that no new jobs can come in and all running
2627 # ones are finished
2628 self.feedback_fn("Stopping master daemons")
2629 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2630 try:
2631 # Stop daemons on all nodes
2632 for node_name in self.online_nodes:
2633 self.feedback_fn("Stopping daemons on %s" % node_name)
2634 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2635
2636 # All daemons are shut down now
2637 try:
2638 return fn(self, *args)
2639 except Exception, err:
2640 _, errmsg = FormatError(err)
2641 logging.exception("Caught exception")
2642 self.feedback_fn(errmsg)
2643 raise
2644 finally:
2645 # Start cluster again, master node last
2646 for node_name in self.nonmaster_nodes + [self.master_node]:
2647 self.feedback_fn("Starting daemons on %s" % node_name)
2648 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2649 finally:
2650 # Resume watcher
2651 watcher_block.Close()
2652
2653
2654 def RunWhileClusterStopped(feedback_fn, fn, *args):
2655 """Calls a function while all cluster daemons are stopped.
2656
2657 @type feedback_fn: callable
2658 @param feedback_fn: Feedback function
2659 @type fn: callable
2660 @param fn: Function to be called when daemons are stopped
2661
2662 """
2663 feedback_fn("Gathering cluster information")
2664
2665 # This ensures we're running on the master daemon
2666 cl = GetClient()
2667
2668 (cluster_name, master_node) = \
2669 cl.QueryConfigValues(["cluster_name", "master_node"])
2670
2671 online_nodes = GetOnlineNodes([], cl=cl)
2672
2673 # Don't keep a reference to the client. The master daemon will go away.
2674 del cl
2675
2676 assert master_node in online_nodes
2677
2678 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2679 online_nodes).Call(fn, *args)
2680
2681
2682 def GenerateTable(headers, fields, separator, data,
2683 numfields=None, unitfields=None,
2684 units=None):
2685 """Prints a table with headers and different fields.
2686
2687 @type headers: dict
2688 @param headers: dictionary mapping field names to headers for
2689 the table
2690 @type fields: list
2691 @param fields: the field names corresponding to each row in
2692 the data field
2693 @param separator: the separator to be used; if this is None,
2694 the default 'smart' algorithm is used which computes optimal
2695 field width, otherwise just the separator is used between
2696 each field
2697 @type data: list
2698 @param data: a list of lists, each sublist being one row to be output
2699 @type numfields: list
2700 @param numfields: a list with the fields that hold numeric
2701 values and thus should be right-aligned
2702 @type unitfields: list
2703 @param unitfields: a list with the fields that hold numeric
2704 values that should be formatted with the units field
2705 @type units: string or None
2706 @param units: the units we should use for formatting, or None for
2707 automatic choice (human-readable for non-separator usage, otherwise
2708 megabytes); this is a one-letter string
2709
2710 """
2711 if units is None:
2712 if separator:
2713 units = "m"
2714 else:
2715 units = "h"
2716
2717 if numfields is None:
2718 numfields = []
2719 if unitfields is None:
2720 unitfields = []
2721
2722 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2723 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2724
2725 format_fields = []
2726 for field in fields:
2727 if headers and field not in headers:
2728 # TODO: handle better unknown fields (either revert to old
2729 # style of raising exception, or deal more intelligently with
2730 # variable fields)
2731 headers[field] = field
2732 if separator is not None:
2733 format_fields.append("%s")
2734 elif numfields.Matches(field):
2735 format_fields.append("%*s")
2736 else:
2737 format_fields.append("%-*s")
2738
2739 if separator is None:
2740 mlens = [0 for name in fields]
2741 format_str = " ".join(format_fields)
2742 else:
2743 format_str = separator.replace("%", "%%").join(format_fields)
2744
2745 for row in data:
2746 if row is None:
2747 continue
2748 for idx, val in enumerate(row):
2749 if unitfields.Matches(fields[idx]):
2750 try:
2751 val = int(val)
2752 except (TypeError, ValueError):
2753 pass
2754 else:
2755 val = row[idx] = utils.FormatUnit(val, units)
2756 val = row[idx] = str(val)
2757 if separator is None:
2758 mlens[idx] = max(mlens[idx], len(val))
2759
2760 result = []
2761 if headers:
2762 args = []
2763 for idx, name in enumerate(fields):
2764 hdr = headers[name]
2765 if separator is None:
2766 mlens[idx] = max(mlens[idx], len(hdr))
2767 args.append(mlens[idx])
2768 args.append(hdr)
2769 result.append(format_str % tuple(args))
2770
2771 if separator is None:
2772 assert len(mlens) == len(fields)
2773
2774 if fields and not numfields.Matches(fields[-1]):
2775 mlens[-1] = 0
2776
2777 for line in data:
2778 args = []
2779 if line is None:
2780 line = ["-" for _ in fields]
2781 for idx in range(len(fields)):
2782 if separator is None:
2783 args.append(mlens[idx])
2784 args.append(line[idx])
2785 result.append(format_str % tuple(args))
2786
2787 return result
2788
2789
2790 def _FormatBool(value):
2791 """Formats a boolean value as a string.
2792
2793 """
2794 if value:
2795 return "Y"
2796 return "N"
2797
2798
2799 #: Default formatting for query results; (callback, align right)
2800 _DEFAULT_FORMAT_QUERY = {
2801 constants.QFT_TEXT: (str, False),
2802 constants.QFT_BOOL: (_FormatBool, False),
2803 constants.QFT_NUMBER: (str, True),
2804 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2805 constants.QFT_OTHER: (str, False),
2806 constants.QFT_UNKNOWN: (str, False),
2807 }
2808
2809
2810 def _GetColumnFormatter(fdef, override, unit):
2811 """Returns formatting function for a field.
2812
2813 @type fdef: L{objects.QueryFieldDefinition}
2814 @type override: dict
2815 @param override: Dictionary for overriding field formatting functions,
2816 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2817 @type unit: string
2818 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2819 @rtype: tuple; (callable, bool)
2820 @return: Returns the function to format a value (takes one parameter) and a
2821 boolean for aligning the value on the right-hand side
2822
2823 """
2824 fmt = override.get(fdef.name, None)
2825 if fmt is not None:
2826 return fmt
2827
2828 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2829
2830 if fdef.kind == constants.QFT_UNIT:
2831 # Can't keep this information in the static dictionary
2832 return (lambda value: utils.FormatUnit(value, unit), True)
2833
2834 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2835 if fmt is not None:
2836 return fmt
2837
2838 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2839
2840
2841 class _QueryColumnFormatter:
2842 """Callable class for formatting fields of a query.
2843
2844 """
2845 def __init__(self, fn, status_fn, verbose):
2846 """Initializes this class.
2847
2848 @type fn: callable
2849 @param fn: Formatting function
2850 @type status_fn: callable
2851 @param status_fn: Function to report fields' status
2852 @type verbose: boolean
2853 @param verbose: whether to use verbose field descriptions or not
2854
2855 """
2856 self._fn = fn
2857 self._status_fn = status_fn
2858 self._verbose = verbose
2859
2860 def __call__(self, data):
2861 """Returns a field's string representation.
2862
2863 """
2864 (status, value) = data
2865
2866 # Report status
2867 self._status_fn(status)
2868
2869 if status == constants.RS_NORMAL:
2870 return self._fn(value)
2871
2872 assert value is None, \
2873 "Found value %r for abnormal status %s" % (value, status)
2874
2875 return FormatResultError(status, self._verbose)
2876
2877
2878 def FormatResultError(status, verbose):
2879 """Formats result status other than L{constants.RS_NORMAL}.
2880
2881 @param status: The result status
2882 @type verbose: boolean
2883 @param verbose: Whether to return the verbose text
2884 @return: Text of result status
2885
2886 """
2887 assert status != constants.RS_NORMAL, \
2888 "FormatResultError called with status equal to constants.RS_NORMAL"
2889 try:
2890 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2891 except KeyError:
2892 raise NotImplementedError("Unknown status %s" % status)
2893 else:
2894 if verbose:
2895 return verbose_text
2896 return normal_text
2897
2898
2899 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2900 header=False, verbose=False):
2901 """Formats data in L{objects.QueryResponse}.
2902
2903 @type result: L{objects.QueryResponse}
2904 @param result: result of query operation
2905 @type unit: string
2906 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2907 see L{utils.text.FormatUnit}
2908 @type format_override: dict
2909 @param format_override: Dictionary for overriding field formatting functions,
2910 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2911 @type separator: string or None
2912 @param separator: String used to separate fields
2913 @type header: bool
2914 @param header: Whether to output header row
2915 @type verbose: boolean
2916 @param verbose: whether to use verbose field descriptions or not
2917
2918 """
2919 if unit is None:
2920 if separator:
2921 unit = "m"
2922 else:
2923 unit = "h"
2924
2925 if format_override is None:
2926 format_override = {}
2927
2928 stats = dict.fromkeys(constants.RS_ALL, 0)
2929
2930 def _RecordStatus(status):
2931 if status in stats:
2932 stats[status] += 1
2933
2934 columns = []
2935 for fdef in result.fields:
2936 assert fdef.title and fdef.name
2937 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2938 columns.append(TableColumn(fdef.title,
2939 _QueryColumnFormatter(fn, _RecordStatus,
2940 verbose),
2941 align_right))
2942
2943 table = FormatTable(result.data, columns, header, separator)
2944
2945 # Collect statistics
2946 assert len(stats) == len(constants.RS_ALL)
2947 assert compat.all(count >= 0 for count in stats.values())
2948
2949 # Determine overall status. If there was no data, unknown fields must be
2950 # detected via the field definitions.
2951 if (stats[constants.RS_UNKNOWN] or
2952 (not result.data and _GetUnknownFields(result.fields))):
2953 status = QR_UNKNOWN
2954 elif compat.any(count > 0 for key, count in stats.items()
2955 if key != constants.RS_NORMAL):
2956 status = QR_INCOMPLETE
2957 else:
2958 status = QR_NORMAL
2959
2960 return (status, table)
2961
2962
2963 def _GetUnknownFields(fdefs):
2964 """Returns list of unknown fields included in C{fdefs}.
2965
2966 @type fdefs: list of L{objects.QueryFieldDefinition}
2967
2968 """
2969 return [fdef for fdef in fdefs
2970 if fdef.kind == constants.QFT_UNKNOWN]
2971
2972
2973 def _WarnUnknownFields(fdefs):
2974 """Prints a warning to stderr if a query included unknown fields.
2975
2976 @type fdefs: list of L{objects.QueryFieldDefinition}
2977
2978 """
2979 unknown = _GetUnknownFields(fdefs)
2980 if unknown:
2981 ToStderr("Warning: Queried for unknown fields %s",
2982 utils.CommaJoin(fdef.name for fdef in unknown))
2983 return True
2984
2985 return False
2986
2987
2988 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2989 format_override=None, verbose=False, force_filter=False,
2990 namefield=None, qfilter=None, isnumeric=False):
2991 """Generic implementation for listing all items of a resource.
2992
2993 @param resource: One of L{constants.QR_VIA_LUXI}
2994 @type fields: list of strings
2995 @param fields: List of fields to query for
2996 @type names: list of strings
2997 @param names: Names of items to query for
2998 @type unit: string or None
2999 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3000 None for automatic choice (human-readable for non-separator usage,
3001 otherwise megabytes); this is a one-letter string
3002 @type separator: string or None
3003 @param separator: String used to separate fields
3004 @type header: bool
3005 @param header: Whether to show header row
3006 @type force_filter: bool
3007 @param force_filter: Whether to always treat names as filter
3008 @type format_override: dict
3009 @param format_override: Dictionary for overriding field formatting functions,
3010 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3011 @type verbose: boolean
3012 @param verbose: whether to use verbose field descriptions or not
3013 @type namefield: string
3014 @param namefield: Name of field to use for simple filters (see
3015 L{qlang.MakeFilter} for details)
3016 @type qfilter: list or None
3017 @param qfilter: Query filter (in addition to names)
3018 @param isnumeric: bool
3019 @param isnumeric: Whether the namefield's type is numeric, and therefore
3020 any simple filters built by namefield should use integer values to
3021 reflect that
3022
3023 """
3024 if not names:
3025 names = None
3026
3027 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3028 isnumeric=isnumeric)
3029
3030 if qfilter is None:
3031 qfilter = namefilter
3032 elif namefilter is not None:
3033 qfilter = [qlang.OP_AND, namefilter, qfilter]
3034
3035 if cl is None:
3036 cl = GetClient()
3037
3038 response = cl.Query(resource, fields, qfilter)
3039
3040 found_unknown = _WarnUnknownFields(response.fields)
3041
3042 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3043 header=header,
3044 format_override=format_override,
3045 verbose=verbose)
3046
3047 for line in data:
3048 ToStdout(line)
3049
3050 assert ((found_unknown and status == QR_UNKNOWN) or
3051 (not found_unknown and status != QR_UNKNOWN))
3052
3053 if status == QR_UNKNOWN:
3054 return constants.EXIT_UNKNOWN_FIELD
3055
3056 # TODO: Should the list command fail if not all data could be collected?
3057 return constants.EXIT_SUCCESS
3058
3059
3060 def GenericListFields(resource, fields, separator, header, cl=None):
3061 """Generic implementation for listing fields for a resource.
3062
3063 @param resource: One of L{constants.QR_VIA_LUXI}
3064 @type fields: list of strings
3065 @param fields: List of fields to query for
3066 @type separator: string or None
3067 @param separator: String used to separate fields
3068 @type header: bool
3069 @param header: Whether to show header row
3070
3071 """
3072 if cl is None:
3073 cl = GetClient()
3074
3075 if not fields:
3076 fields = None
3077
3078 response = cl.QueryFields(resource, fields)
3079
3080 found_unknown = _WarnUnknownFields(response.fields)
3081
3082 columns = [
3083 TableColumn("Name", str, False),
3084 TableColumn("Title", str, False),
3085 TableColumn("Description", str, False),
3086 ]
3087
3088 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
3089
3090 for line in FormatTable(rows, columns, header, separator):
3091 ToStdout(line)
3092
3093 if found_unknown:
3094 return constants.EXIT_UNKNOWN_FIELD
3095
3096 return constants.EXIT_SUCCESS
3097
3098
3099 class TableColumn:
3100 """Describes a column for L{FormatTable}.
3101
3102 """
3103 def __init__(self, title, fn, align_right):
3104 """Initializes this class.
3105
3106 @type title: string
3107 @param title: Column title
3108 @type fn: callable
3109 @param fn: Formatting function
3110 @type align_right: bool
3111 @param align_right: Whether to align values on the right-hand side
3112
3113 """
3114 self.title = title
3115 self.format = fn
3116 self.align_right = align_right
3117
3118
3119 def _GetColFormatString(width, align_right):
3120 """Returns the format string for a field.
3121
3122 """
3123 if align_right:
3124 sign = ""
3125 else:
3126 sign = "-"
3127
3128 return "%%%s%ss" % (sign, width)
3129
3130
3131 def FormatTable(rows, columns, header, separator):
3132 """Formats data as a table.
3133
3134 @type rows: list of lists
3135 @param rows: Row data, one list per row
3136 @type columns: list of L{TableColumn}
3137 @param columns: Column descriptions
3138 @type header: bool
3139 @param header: Whether to show header row
3140 @type separator: string or None
3141 @param separator: String used to separate columns
3142
3143 """
3144 if header:
3145 data = [[col.title for col in columns]]
3146 colwidth = [len(col.title) for col in columns]
3147 else:
3148 data = []
3149 colwidth = [0 for _ in columns]
3150
3151 # Format row data
3152 for row in rows:
3153 assert len(row) == len(columns)
3154
3155 formatted = [col.format(value) for value, col in zip(row, columns)]
3156
3157 if separator is None:
3158 # Update column widths
3159 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3160 # Modifying a list's items while iterating is fine
3161 colwidth[idx] = max(oldwidth, len(value))
3162
3163 data.append(formatted)
3164
3165 if separator is not None:
3166 # Return early if a separator is used
3167 return [separator.join(row) for row in data]
3168
3169 if columns and not columns[-1].align_right:
3170 # Avoid unnecessary spaces at end of line
3171 colwidth[-1] = 0
3172
3173 # Build format string
3174 fmt = " ".join([_GetColFormatString(width, col.align_right)
3175 for col, width in zip(columns, colwidth)])
3176
3177 return [fmt % tuple(row) for row in data]
3178
3179
3180 def FormatTimestamp(ts):
3181 """Formats a given timestamp.
3182
3183 @type ts: timestamp
3184 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3185
3186 @rtype: string
3187 @return: a string with the formatted timestamp
3188
3189 """
3190 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3191 return "?"
3192
3193 (sec, usecs) = ts
3194 return utils.FormatTime(sec, usecs=usecs)
3195
3196
3197 def ParseTimespec(value):
3198 """Parse a time specification.
3199
3200 The following suffixed will be recognized:
3201
3202 - s: seconds
3203 - m: minutes
3204 - h: hours
3205 - d: day
3206 - w: weeks
3207
3208 Without any suffix, the value will be taken to be in seconds.
3209
3210 """
3211 value = str(value)
3212 if not value:
3213 raise errors.OpPrereqError("Empty time specification passed",
3214 errors.ECODE_INVAL)
3215 suffix_map = {
3216 "s": 1,
3217 "m": 60,
3218 "h": 3600,
3219 "d": 86400,
3220 "w": 604800,
3221 }
3222 if value[-1] not in suffix_map:
3223 try:
3224 value = int(value)
3225 except (TypeError, ValueError):
3226 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3227 errors.ECODE_INVAL)
3228 else:
3229 multiplier = suffix_map[value[-1]]
3230 value = value[:-1]
3231 if not value: # no data left after stripping the suffix
3232 raise errors.OpPrereqError("Invalid time specification (only"
3233 " suffix passed)", errors.ECODE_INVAL)
3234 try:
3235 value = int(value) * multiplier
3236 except (TypeError, ValueError):
3237 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3238 errors.ECODE_INVAL)
3239 return value
3240
3241
3242 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3243 filter_master=False, nodegroup=None):
3244 """Returns the names of online nodes.
3245
3246 This function will also log a warning on stderr with the names of
3247 the online nodes.
3248
3249 @param nodes: if not empty, use only this subset of nodes (minus the
3250 offline ones)
3251 @param cl: if not None, luxi client to use
3252 @type nowarn: boolean
3253 @param nowarn: by default, this function will output a note with the
3254 offline nodes that are skipped; if this parameter is True the
3255 note is not displayed
3256 @type secondary_ips: boolean
3257 @param secondary_ips: if True, return the secondary IPs instead of the
3258 names, useful for doing network traffic over the replication interface
3259 (if any)