Renew SSH keys and upgrade
[ganeti-github.git] / lib / cli.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Module dealing with command line parsing"""
32
33
34 import sys
35 import textwrap
36 import os.path
37 import time
38 import logging
39 import errno
40 import itertools
41 import shlex
42 from cStringIO import StringIO
43
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import opcodes
48 import ganeti.rpc.errors as rpcerr
49 import ganeti.rpc.node as rpc
50 from ganeti import ssh
51 from ganeti import compat
52 from ganeti import netutils
53 from ganeti import qlang
54 from ganeti import objects
55 from ganeti import pathutils
56 from ganeti import serializer
57
58 from ganeti.runtime import (GetClient)
59
60 from optparse import (OptionParser, TitledHelpFormatter,
61 Option, OptionValueError)
62
63
64 __all__ = [
65 # Command line options
66 "ABSOLUTE_OPT",
67 "ADD_UIDS_OPT",
68 "ADD_RESERVED_IPS_OPT",
69 "ALLOCATABLE_OPT",
70 "ALLOC_POLICY_OPT",
71 "ALL_OPT",
72 "ALLOW_FAILOVER_OPT",
73 "AUTO_PROMOTE_OPT",
74 "AUTO_REPLACE_OPT",
75 "BACKEND_OPT",
76 "BLK_OS_OPT",
77 "CAPAB_MASTER_OPT",
78 "CAPAB_VM_OPT",
79 "CLEANUP_OPT",
80 "CLUSTER_DOMAIN_SECRET_OPT",
81 "CONFIRM_OPT",
82 "CP_SIZE_OPT",
83 "COMPRESSION_TOOLS_OPT",
84 "DEBUG_OPT",
85 "DEBUG_SIMERR_OPT",
86 "DISKIDX_OPT",
87 "DISK_OPT",
88 "DISK_PARAMS_OPT",
89 "DISK_TEMPLATE_OPT",
90 "DRAINED_OPT",
91 "DRY_RUN_OPT",
92 "DRBD_HELPER_OPT",
93 "DST_NODE_OPT",
94 "EARLY_RELEASE_OPT",
95 "ENABLED_HV_OPT",
96 "ENABLED_DISK_TEMPLATES_OPT",
97 "ENABLED_USER_SHUTDOWN_OPT",
98 "ERROR_CODES_OPT",
99 "EXT_PARAMS_OPT",
100 "FAILURE_ONLY_OPT",
101 "FIELDS_OPT",
102 "FILESTORE_DIR_OPT",
103 "FILESTORE_DRIVER_OPT",
104 "FORCE_FAILOVER_OPT",
105 "FORCE_FILTER_OPT",
106 "FORCE_OPT",
107 "FORCE_VARIANT_OPT",
108 "GATEWAY_OPT",
109 "GATEWAY6_OPT",
110 "GLOBAL_FILEDIR_OPT",
111 "HID_OS_OPT",
112 "GLOBAL_GLUSTER_FILEDIR_OPT",
113 "GLOBAL_SHARED_FILEDIR_OPT",
114 "HOTPLUG_OPT",
115 "HOTPLUG_IF_POSSIBLE_OPT",
116 "HVLIST_OPT",
117 "HVOPTS_OPT",
118 "HYPERVISOR_OPT",
119 "IALLOCATOR_OPT",
120 "DEFAULT_IALLOCATOR_OPT",
121 "DEFAULT_IALLOCATOR_PARAMS_OPT",
122 "ENABLED_DATA_COLLECTORS_OPT",
123 "IDENTIFY_DEFAULTS_OPT",
124 "IGNORE_CONSIST_OPT",
125 "IGNORE_ERRORS_OPT",
126 "IGNORE_FAILURES_OPT",
127 "IGNORE_OFFLINE_OPT",
128 "IGNORE_REMOVE_FAILURES_OPT",
129 "IGNORE_SECONDARIES_OPT",
130 "IGNORE_SIZE_OPT",
131 "INCLUDEDEFAULTS_OPT",
132 "INTERVAL_OPT",
133 "INSTALL_IMAGE_OPT",
134 "INSTANCE_COMMUNICATION_OPT",
135 "INSTANCE_COMMUNICATION_NETWORK_OPT",
136 "MAC_PREFIX_OPT",
137 "MAINTAIN_NODE_HEALTH_OPT",
138 "MASTER_NETDEV_OPT",
139 "MASTER_NETMASK_OPT",
140 "MAX_TRACK_OPT",
141 "MC_OPT",
142 "MIGRATION_MODE_OPT",
143 "MODIFY_ETCHOSTS_OPT",
144 "NET_OPT",
145 "NETWORK_OPT",
146 "NETWORK6_OPT",
147 "NEW_CLUSTER_CERT_OPT",
148 "NEW_NODE_CERT_OPT",
149 "NEW_SSH_KEY_OPT",
150 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
151 "NEW_CONFD_HMAC_KEY_OPT",
152 "NEW_RAPI_CERT_OPT",
153 "NEW_PRIMARY_OPT",
154 "NEW_SECONDARY_OPT",
155 "NEW_SPICE_CERT_OPT",
156 "NIC_PARAMS_OPT",
157 "NOCONFLICTSCHECK_OPT",
158 "NODE_FORCE_JOIN_OPT",
159 "NODE_LIST_OPT",
160 "NODE_PLACEMENT_OPT",
161 "NODEGROUP_OPT",
162 "NODE_PARAMS_OPT",
163 "NODE_POWERED_OPT",
164 "NOHDR_OPT",
165 "NOIPCHECK_OPT",
166 "NO_INSTALL_OPT",
167 "NONAMECHECK_OPT",
168 "NOMODIFY_ETCHOSTS_OPT",
169 "NOMODIFY_SSH_SETUP_OPT",
170 "NONICS_OPT",
171 "NONLIVE_OPT",
172 "NONPLUS1_OPT",
173 "NORUNTIME_CHGS_OPT",
174 "NOSHUTDOWN_OPT",
175 "NOSTART_OPT",
176 "NOSSH_KEYCHECK_OPT",
177 "NOVOTING_OPT",
178 "NO_REMEMBER_OPT",
179 "NWSYNC_OPT",
180 "OFFLINE_INST_OPT",
181 "ONLINE_INST_OPT",
182 "ON_PRIMARY_OPT",
183 "ON_SECONDARY_OPT",
184 "OFFLINE_OPT",
185 "OS_OPT",
186 "OSPARAMS_OPT",
187 "OSPARAMS_PRIVATE_OPT",
188 "OSPARAMS_SECRET_OPT",
189 "OS_SIZE_OPT",
190 "OOB_TIMEOUT_OPT",
191 "POWER_DELAY_OPT",
192 "PREALLOC_WIPE_DISKS_OPT",
193 "PRIMARY_IP_VERSION_OPT",
194 "PRIMARY_ONLY_OPT",
195 "PRINT_JOBID_OPT",
196 "PRIORITY_OPT",
197 "RAPI_CERT_OPT",
198 "READD_OPT",
199 "REASON_OPT",
200 "REBOOT_TYPE_OPT",
201 "REMOVE_INSTANCE_OPT",
202 "REMOVE_RESERVED_IPS_OPT",
203 "REMOVE_UIDS_OPT",
204 "RESERVED_LVS_OPT",
205 "RQL_OPT",
206 "RUNTIME_MEM_OPT",
207 "ROMAN_OPT",
208 "SECONDARY_IP_OPT",
209 "SECONDARY_ONLY_OPT",
210 "SELECT_OS_OPT",
211 "SEP_OPT",
212 "SHOWCMD_OPT",
213 "SHOW_MACHINE_OPT",
214 "COMPRESS_OPT",
215 "TRANSPORT_COMPRESSION_OPT",
216 "SHUTDOWN_TIMEOUT_OPT",
217 "SINGLE_NODE_OPT",
218 "SPECS_CPU_COUNT_OPT",
219 "SPECS_DISK_COUNT_OPT",
220 "SPECS_DISK_SIZE_OPT",
221 "SPECS_MEM_SIZE_OPT",
222 "SPECS_NIC_COUNT_OPT",
223 "SPLIT_ISPECS_OPTS",
224 "IPOLICY_STD_SPECS_OPT",
225 "IPOLICY_DISK_TEMPLATES",
226 "IPOLICY_VCPU_RATIO",
227 "SEQUENTIAL_OPT",
228 "SPICE_CACERT_OPT",
229 "SPICE_CERT_OPT",
230 "SRC_DIR_OPT",
231 "SRC_NODE_OPT",
232 "SUBMIT_OPT",
233 "SUBMIT_OPTS",
234 "STARTUP_PAUSED_OPT",
235 "STATIC_OPT",
236 "SYNC_OPT",
237 "TAG_ADD_OPT",
238 "TAG_SRC_OPT",
239 "TIMEOUT_OPT",
240 "TO_GROUP_OPT",
241 "UIDPOOL_OPT",
242 "USEUNITS_OPT",
243 "USE_EXTERNAL_MIP_SCRIPT",
244 "USE_REPL_NET_OPT",
245 "VERBOSE_OPT",
246 "VG_NAME_OPT",
247 "WFSYNC_OPT",
248 "YES_DOIT_OPT",
249 "ZEROING_IMAGE_OPT",
250 "ZERO_FREE_SPACE_OPT",
251 "HELPER_STARTUP_TIMEOUT_OPT",
252 "HELPER_SHUTDOWN_TIMEOUT_OPT",
253 "ZEROING_TIMEOUT_FIXED_OPT",
254 "ZEROING_TIMEOUT_PER_MIB_OPT",
255 "DISK_STATE_OPT",
256 "HV_STATE_OPT",
257 "IGNORE_IPOLICY_OPT",
258 "INSTANCE_POLICY_OPTS",
259 # Generic functions for CLI programs
260 "ConfirmOperation",
261 "CreateIPolicyFromOpts",
262 "GenericMain",
263 "GenericInstanceCreate",
264 "GenericList",
265 "GenericListFields",
266 "GetClient",
267 "GetOnlineNodes",
268 "GetNodesSshPorts",
269 "GetNodeUUIDs",
270 "JobExecutor",
271 "JobSubmittedException",
272 "ParseTimespec",
273 "RunWhileClusterStopped",
274 "SubmitOpCode",
275 "SubmitOpCodeToDrainedQueue",
276 "SubmitOrSend",
277 "UsesRPC",
278 # Formatting functions
279 "ToStderr", "ToStdout",
280 "FormatError",
281 "FormatQueryResult",
282 "FormatParamsDictInfo",
283 "FormatPolicyInfo",
284 "PrintIPolicyCommand",
285 "PrintGenericInfo",
286 "GenerateTable",
287 "AskUser",
288 "FormatTimestamp",
289 "FormatLogMessage",
290 # Tags functions
291 "ListTags",
292 "AddTags",
293 "RemoveTags",
294 # command line options support infrastructure
295 "ARGS_MANY_INSTANCES",
296 "ARGS_MANY_NODES",
297 "ARGS_MANY_GROUPS",
298 "ARGS_MANY_NETWORKS",
299 "ARGS_NONE",
300 "ARGS_ONE_INSTANCE",
301 "ARGS_ONE_NODE",
302 "ARGS_ONE_GROUP",
303 "ARGS_ONE_OS",
304 "ARGS_ONE_NETWORK",
305 "ArgChoice",
306 "ArgCommand",
307 "ArgFile",
308 "ArgGroup",
309 "ArgHost",
310 "ArgInstance",
311 "ArgJobId",
312 "ArgNetwork",
313 "ArgNode",
314 "ArgOs",
315 "ArgExtStorage",
316 "ArgSuggest",
317 "ArgUnknown",
318 "OPT_COMPL_INST_ADD_NODES",
319 "OPT_COMPL_MANY_NODES",
320 "OPT_COMPL_ONE_IALLOCATOR",
321 "OPT_COMPL_ONE_INSTANCE",
322 "OPT_COMPL_ONE_NODE",
323 "OPT_COMPL_ONE_NODEGROUP",
324 "OPT_COMPL_ONE_NETWORK",
325 "OPT_COMPL_ONE_OS",
326 "OPT_COMPL_ONE_EXTSTORAGE",
327 "cli_option",
328 "FixHvParams",
329 "SplitNodeOption",
330 "CalculateOSNames",
331 "ParseFields",
332 "COMMON_CREATE_OPTS",
333 ]
334
335 NO_PREFIX = "no_"
336 UN_PREFIX = "-"
337
338 #: Priorities (sorted)
339 _PRIORITY_NAMES = [
340 ("low", constants.OP_PRIO_LOW),
341 ("normal", constants.OP_PRIO_NORMAL),
342 ("high", constants.OP_PRIO_HIGH),
343 ]
344
345 #: Priority dictionary for easier lookup
346 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
347 # we migrate to Python 2.6
348 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
349
350 # Query result status for clients
351 (QR_NORMAL,
352 QR_UNKNOWN,
353 QR_INCOMPLETE) = range(3)
354
355 #: Maximum batch size for ChooseJob
356 _CHOOSE_BATCH = 25
357
358
359 # constants used to create InstancePolicy dictionary
360 TISPECS_GROUP_TYPES = {
361 constants.ISPECS_MIN: constants.VTYPE_INT,
362 constants.ISPECS_MAX: constants.VTYPE_INT,
363 }
364
365 TISPECS_CLUSTER_TYPES = {
366 constants.ISPECS_MIN: constants.VTYPE_INT,
367 constants.ISPECS_MAX: constants.VTYPE_INT,
368 constants.ISPECS_STD: constants.VTYPE_INT,
369 }
370
371 #: User-friendly names for query2 field types
372 _QFT_NAMES = {
373 constants.QFT_UNKNOWN: "Unknown",
374 constants.QFT_TEXT: "Text",
375 constants.QFT_BOOL: "Boolean",
376 constants.QFT_NUMBER: "Number",
377 constants.QFT_NUMBER_FLOAT: "Floating-point number",
378 constants.QFT_UNIT: "Storage size",
379 constants.QFT_TIMESTAMP: "Timestamp",
380 constants.QFT_OTHER: "Custom",
381 }
382
383
384 class _Argument(object):
385 def __init__(self, min=0, max=None): # pylint: disable=W0622
386 self.min = min
387 self.max = max
388
389 def __repr__(self):
390 return ("<%s min=%s max=%s>" %
391 (self.__class__.__name__, self.min, self.max))
392
393
394 class ArgSuggest(_Argument):
395 """Suggesting argument.
396
397 Value can be any of the ones passed to the constructor.
398
399 """
400 # pylint: disable=W0622
401 def __init__(self, min=0, max=None, choices=None):
402 _Argument.__init__(self, min=min, max=max)
403 self.choices = choices
404
405 def __repr__(self):
406 return ("<%s min=%s max=%s choices=%r>" %
407 (self.__class__.__name__, self.min, self.max, self.choices))
408
409
410 class ArgChoice(ArgSuggest):
411 """Choice argument.
412
413 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
414 but value must be one of the choices.
415
416 """
417
418
419 class ArgUnknown(_Argument):
420 """Unknown argument to program (e.g. determined at runtime).
421
422 """
423
424
425 class ArgInstance(_Argument):
426 """Instances argument.
427
428 """
429
430
431 class ArgNode(_Argument):
432 """Node argument.
433
434 """
435
436
437 class ArgNetwork(_Argument):
438 """Network argument.
439
440 """
441
442
443 class ArgGroup(_Argument):
444 """Node group argument.
445
446 """
447
448
449 class ArgJobId(_Argument):
450 """Job ID argument.
451
452 """
453
454
455 class ArgFile(_Argument):
456 """File path argument.
457
458 """
459
460
461 class ArgCommand(_Argument):
462 """Command argument.
463
464 """
465
466
467 class ArgHost(_Argument):
468 """Host argument.
469
470 """
471
472
473 class ArgOs(_Argument):
474 """OS argument.
475
476 """
477
478
479 class ArgExtStorage(_Argument):
480 """ExtStorage argument.
481
482 """
483
484
485 ARGS_NONE = []
486 ARGS_MANY_INSTANCES = [ArgInstance()]
487 ARGS_MANY_NETWORKS = [ArgNetwork()]
488 ARGS_MANY_NODES = [ArgNode()]
489 ARGS_MANY_GROUPS = [ArgGroup()]
490 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
491 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
492 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
493 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
494 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
495
496
497 def _ExtractTagsObject(opts, args):
498 """Extract the tag type object.
499
500 Note that this function will modify its args parameter.
501
502 """
503 if not hasattr(opts, "tag_type"):
504 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
505 kind = opts.tag_type
506 if kind == constants.TAG_CLUSTER:
507 retval = kind, ""
508 elif kind in (constants.TAG_NODEGROUP,
509 constants.TAG_NODE,
510 constants.TAG_NETWORK,
511 constants.TAG_INSTANCE):
512 if not args:
513 raise errors.OpPrereqError("no arguments passed to the command",
514 errors.ECODE_INVAL)
515 name = args.pop(0)
516 retval = kind, name
517 else:
518 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
519 return retval
520
521
522 def _ExtendTags(opts, args):
523 """Extend the args if a source file has been given.
524
525 This function will extend the tags with the contents of the file
526 passed in the 'tags_source' attribute of the opts parameter. A file
527 named '-' will be replaced by stdin.
528
529 """
530 fname = opts.tags_source
531 if fname is None:
532 return
533 if fname == "-":
534 new_fh = sys.stdin
535 else:
536 new_fh = open(fname, "r")
537 new_data = []
538 try:
539 # we don't use the nice 'new_data = [line.strip() for line in fh]'
540 # because of python bug 1633941
541 while True:
542 line = new_fh.readline()
543 if not line:
544 break
545 new_data.append(line.strip())
546 finally:
547 new_fh.close()
548 args.extend(new_data)
549
550
551 def ListTags(opts, args):
552 """List the tags on a given object.
553
554 This is a generic implementation that knows how to deal with all
555 three cases of tag objects (cluster, node, instance). The opts
556 argument is expected to contain a tag_type field denoting what
557 object type we work on.
558
559 """
560 kind, name = _ExtractTagsObject(opts, args)
561 cl = GetClient()
562 result = cl.QueryTags(kind, name)
563 result = list(result)
564 result.sort()
565 for tag in result:
566 ToStdout(tag)
567
568
569 def AddTags(opts, args):
570 """Add tags on a given object.
571
572 This is a generic implementation that knows how to deal with all
573 three cases of tag objects (cluster, node, instance). The opts
574 argument is expected to contain a tag_type field denoting what
575 object type we work on.
576
577 """
578 kind, name = _ExtractTagsObject(opts, args)
579 _ExtendTags(opts, args)
580 if not args:
581 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
582 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
583 SubmitOrSend(op, opts)
584
585
586 def RemoveTags(opts, args):
587 """Remove tags from a given object.
588
589 This is a generic implementation that knows how to deal with all
590 three cases of tag objects (cluster, node, instance). The opts
591 argument is expected to contain a tag_type field denoting what
592 object type we work on.
593
594 """
595 kind, name = _ExtractTagsObject(opts, args)
596 _ExtendTags(opts, args)
597 if not args:
598 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
599 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
600 SubmitOrSend(op, opts)
601
602
603 def check_unit(option, opt, value): # pylint: disable=W0613
604 """OptParsers custom converter for units.
605
606 """
607 try:
608 return utils.ParseUnit(value)
609 except errors.UnitParseError, err:
610 raise OptionValueError("option %s: %s" % (opt, err))
611
612
613 def _SplitKeyVal(opt, data, parse_prefixes):
614 """Convert a KeyVal string into a dict.
615
616 This function will convert a key=val[,...] string into a dict. Empty
617 values will be converted specially: keys which have the prefix 'no_'
618 will have the value=False and the prefix stripped, keys with the prefix
619 "-" will have value=None and the prefix stripped, and the others will
620 have value=True.
621
622 @type opt: string
623 @param opt: a string holding the option name for which we process the
624 data, used in building error messages
625 @type data: string
626 @param data: a string of the format key=val,key=val,...
627 @type parse_prefixes: bool
628 @param parse_prefixes: whether to handle prefixes specially
629 @rtype: dict
630 @return: {key=val, key=val}
631 @raises errors.ParameterError: if there are duplicate keys
632
633 """
634 kv_dict = {}
635 if data:
636 for elem in utils.UnescapeAndSplit(data, sep=","):
637 if "=" in elem:
638 key, val = elem.split("=", 1)
639 elif parse_prefixes:
640 if elem.startswith(NO_PREFIX):
641 key, val = elem[len(NO_PREFIX):], False
642 elif elem.startswith(UN_PREFIX):
643 key, val = elem[len(UN_PREFIX):], None
644 else:
645 key, val = elem, True
646 else:
647 raise errors.ParameterError("Missing value for key '%s' in option %s" %
648 (elem, opt))
649 if key in kv_dict:
650 raise errors.ParameterError("Duplicate key '%s' in option %s" %
651 (key, opt))
652 kv_dict[key] = val
653 return kv_dict
654
655
656 def _SplitIdentKeyVal(opt, value, parse_prefixes):
657 """Helper function to parse "ident:key=val,key=val" options.
658
659 @type opt: string
660 @param opt: option name, used in error messages
661 @type value: string
662 @param value: expected to be in the format "ident:key=val,key=val,..."
663 @type parse_prefixes: bool
664 @param parse_prefixes: whether to handle prefixes specially (see
665 L{_SplitKeyVal})
666 @rtype: tuple
667 @return: (ident, {key=val, key=val})
668 @raises errors.ParameterError: in case of duplicates or other parsing errors
669
670 """
671 if ":" not in value:
672 ident, rest = value, ""
673 else:
674 ident, rest = value.split(":", 1)
675
676 if parse_prefixes and ident.startswith(NO_PREFIX):
677 if rest:
678 msg = "Cannot pass options when removing parameter groups: %s" % value
679 raise errors.ParameterError(msg)
680 retval = (ident[len(NO_PREFIX):], False)
681 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
682 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
683 if rest:
684 msg = "Cannot pass options when removing parameter groups: %s" % value
685 raise errors.ParameterError(msg)
686 retval = (ident[len(UN_PREFIX):], None)
687 else:
688 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
689 retval = (ident, kv_dict)
690 return retval
691
692
693 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
694 """Custom parser for ident:key=val,key=val options.
695
696 This will store the parsed values as a tuple (ident, {key: val}). As such,
697 multiple uses of this option via action=append is possible.
698
699 """
700 return _SplitIdentKeyVal(opt, value, True)
701
702
703 def check_key_val(option, opt, value): # pylint: disable=W0613
704 """Custom parser class for key=val,key=val options.
705
706 This will store the parsed values as a dict {key: val}.
707
708 """
709 return _SplitKeyVal(opt, value, True)
710
711
712 def check_key_private_val(option, opt, value): # pylint: disable=W0613
713 """Custom parser class for private and secret key=val,key=val options.
714
715 This will store the parsed values as a dict {key: val}.
716
717 """
718 return serializer.PrivateDict(_SplitKeyVal(opt, value, True))
719
720
721 def _SplitListKeyVal(opt, value):
722 retval = {}
723 for elem in value.split("/"):
724 if not elem:
725 raise errors.ParameterError("Empty section in option '%s'" % opt)
726 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
727 if ident in retval:
728 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
729 (ident, opt, elem))
730 raise errors.ParameterError(msg)
731 retval[ident] = valdict
732 return retval
733
734
735 def check_multilist_ident_key_val(_, opt, value):
736 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
737
738 @rtype: list of dictionary
739 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
740
741 """
742 retval = []
743 for line in value.split("//"):
744 retval.append(_SplitListKeyVal(opt, line))
745 return retval
746
747
748 def check_bool(option, opt, value): # pylint: disable=W0613
749 """Custom parser for yes/no options.
750
751 This will store the parsed value as either True or False.
752
753 """
754 value = value.lower()
755 if value == constants.VALUE_FALSE or value == "no":
756 return False
757 elif value == constants.VALUE_TRUE or value == "yes":
758 return True
759 else:
760 raise errors.ParameterError("Invalid boolean value '%s'" % value)
761
762
763 def check_list(option, opt, value): # pylint: disable=W0613
764 """Custom parser for comma-separated lists.
765
766 """
767 # we have to make this explicit check since "".split(",") is [""],
768 # not an empty list :(
769 if not value:
770 return []
771 else:
772 return utils.UnescapeAndSplit(value)
773
774
775 def check_maybefloat(option, opt, value): # pylint: disable=W0613
776 """Custom parser for float numbers which might be also defaults.
777
778 """
779 value = value.lower()
780
781 if value == constants.VALUE_DEFAULT:
782 return value
783 else:
784 return float(value)
785
786
787 # completion_suggestion is normally a list. Using numeric values not evaluating
788 # to False for dynamic completion.
789 (OPT_COMPL_MANY_NODES,
790 OPT_COMPL_ONE_NODE,
791 OPT_COMPL_ONE_INSTANCE,
792 OPT_COMPL_ONE_OS,
793 OPT_COMPL_ONE_EXTSTORAGE,
794 OPT_COMPL_ONE_IALLOCATOR,
795 OPT_COMPL_ONE_NETWORK,
796 OPT_COMPL_INST_ADD_NODES,
797 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
798
799 OPT_COMPL_ALL = compat.UniqueFrozenset([
800 OPT_COMPL_MANY_NODES,
801 OPT_COMPL_ONE_NODE,
802 OPT_COMPL_ONE_INSTANCE,
803 OPT_COMPL_ONE_OS,
804 OPT_COMPL_ONE_EXTSTORAGE,
805 OPT_COMPL_ONE_IALLOCATOR,
806 OPT_COMPL_ONE_NETWORK,
807 OPT_COMPL_INST_ADD_NODES,
808 OPT_COMPL_ONE_NODEGROUP,
809 ])
810
811
812 class CliOption(Option):
813 """Custom option class for optparse.
814
815 """
816 ATTRS = Option.ATTRS + [
817 "completion_suggest",
818 ]
819 TYPES = Option.TYPES + (
820 "multilistidentkeyval",
821 "identkeyval",
822 "keyval",
823 "keyprivateval",
824 "unit",
825 "bool",
826 "list",
827 "maybefloat",
828 )
829 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
830 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
831 TYPE_CHECKER["identkeyval"] = check_ident_key_val
832 TYPE_CHECKER["keyval"] = check_key_val
833 TYPE_CHECKER["keyprivateval"] = check_key_private_val
834 TYPE_CHECKER["unit"] = check_unit
835 TYPE_CHECKER["bool"] = check_bool
836 TYPE_CHECKER["list"] = check_list
837 TYPE_CHECKER["maybefloat"] = check_maybefloat
838
839
840 # optparse.py sets make_option, so we do it for our own option class, too
841 cli_option = CliOption
842
843
844 _YORNO = "yes|no"
845
846 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
847 help="Increase debugging level")
848
849 NOHDR_OPT = cli_option("--no-headers", default=False,
850 action="store_true", dest="no_headers",
851 help="Don't display column headers")
852
853 SEP_OPT = cli_option("--separator", default=None,
854 action="store", dest="separator",
855 help=("Separator between output fields"
856 " (defaults to one space)"))
857
858 USEUNITS_OPT = cli_option("--units", default=None,
859 dest="units", choices=("h", "m", "g", "t"),
860 help="Specify units for output (one of h/m/g/t)")
861
862 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
863 type="string", metavar="FIELDS",
864 help="Comma separated list of output fields")
865
866 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
867 default=False, help="Force the operation")
868
869 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
870 default=False, help="Do not require confirmation")
871
872 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
873 action="store_true", default=False,
874 help=("Ignore offline nodes and do as much"
875 " as possible"))
876
877 TAG_ADD_OPT = cli_option("--tags", dest="tags",
878 default=None, help="Comma-separated list of instance"
879 " tags")
880
881 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
882 default=None, help="File with tag names")
883
884 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
885 default=False, action="store_true",
886 help=("Submit the job and return the job ID, but"
887 " don't wait for the job to finish"))
888
889 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
890 default=False, action="store_true",
891 help=("Additionally print the job as first line"
892 " on stdout (for scripting)."))
893
894 SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential",
895 default=False, action="store_true",
896 help=("Execute all resulting jobs sequentially"))
897
898 SYNC_OPT = cli_option("--sync", dest="do_locking",
899 default=False, action="store_true",
900 help=("Grab locks while doing the queries"
901 " in order to ensure more consistent results"))
902
903 DRY_RUN_OPT = cli_option("--dry-run", default=False,
904 action="store_true",
905 help=("Do not execute the operation, just run the"
906 " check steps and verify if it could be"
907 " executed"))
908
909 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
910 action="store_true",
911 help="Increase the verbosity of the operation")
912
913 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
914 action="store_true", dest="simulate_errors",
915 help="Debugging option that makes the operation"
916 " treat most runtime checks as failed")
917
918 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
919 default=True, action="store_false",
920 help="Don't wait for sync (DANGEROUS!)")
921
922 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
923 default=False, action="store_true",
924 help="Wait for disks to sync")
925
926 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
927 action="store_true", default=False,
928 help="Enable offline instance")
929
930 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
931 action="store_true", default=False,
932 help="Disable down instance")
933
934 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
935 help=("Custom disk setup (%s)" %
936 utils.CommaJoin(constants.DISK_TEMPLATES)),
937 default=None, metavar="TEMPL",
938 choices=list(constants.DISK_TEMPLATES))
939
940 EXT_PARAMS_OPT = cli_option("-e", "--ext-params", dest="ext_params",
941 default={}, type="keyval",
942 help="Parameters for ExtStorage template"
943 " conversions in the format:"
944 " provider=prvdr[,param1=val1,param2=val2,...]")
945
946 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
947 help="Do not create any network cards for"
948 " the instance")
949
950 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
951 help="Relative path under default cluster-wide"
952 " file storage dir to store file-based disks",
953 default=None, metavar="<DIR>")
954
955 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
956 help="Driver to use for image files",
957 default=None, metavar="<DRIVER>",
958 choices=list(constants.FILE_DRIVER))
959
960 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
961 help="Select nodes for the instance automatically"
962 " using the <NAME> iallocator plugin",
963 default=None, type="string",
964 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
965
966 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
967 metavar="<NAME>",
968 help="Set the default instance"
969 " allocator plugin",
970 default=None, type="string",
971 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
972
973 DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
974 dest="default_iallocator_params",
975 help="iallocator template"
976 " parameters, in the format"
977 " template:option=value,"
978 " option=value,...",
979 type="keyval",
980 default={})
981
982 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
983 metavar="<os>",
984 completion_suggest=OPT_COMPL_ONE_OS)
985
986 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
987 type="keyval", default={},
988 help="OS parameters")
989
990 OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private",
991 dest="osparams_private",
992 type="keyprivateval",
993 default=serializer.PrivateDict(),
994 help="Private OS parameters"
995 " (won't be logged)")
996
997 OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret",
998 dest="osparams_secret",
999 type="keyprivateval",
1000 default=serializer.PrivateDict(),
1001 help="Secret OS parameters (won't be logged or"
1002 " saved; you must supply these for every"
1003 " operation.)")
1004
1005 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
1006 action="store_true", default=False,
1007 help="Force an unknown variant")
1008
1009 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
1010 action="store_true", default=False,
1011 help="Do not install the OS (will"
1012 " enable no-start)")
1013
1014 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
1015 dest="allow_runtime_chgs",
1016 default=True, action="store_false",
1017 help="Don't allow runtime changes")
1018
1019 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
1020 type="keyval", default={},
1021 help="Backend parameters")
1022
1023 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
1024 default={}, dest="hvparams",
1025 help="Hypervisor parameters")
1026
1027 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
1028 help="Disk template parameters, in the format"
1029 " template:option=value,option=value,...",
1030 type="identkeyval", action="append", default=[])
1031
1032 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
1033 type="keyval", default={},
1034 help="Memory size specs: list of key=value,"
1035 " where key is one of min, max, std"
1036 " (in MB or using a unit)")
1037
1038 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
1039 type="keyval", default={},
1040 help="CPU count specs: list of key=value,"
1041 " where key is one of min, max, std")
1042
1043 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
1044 dest="ispecs_disk_count",
1045 type="keyval", default={},
1046 help="Disk count specs: list of key=value,"
1047 " where key is one of min, max, std")
1048
1049 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
1050 type="keyval", default={},
1051 help="Disk size specs: list of key=value,"
1052 " where key is one of min, max, std"
1053 " (in MB or using a unit)")
1054
1055 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
1056 type="keyval", default={},
1057 help="NIC count specs: list of key=value,"
1058 " where key is one of min, max, std")
1059
1060 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
1061 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
1062 dest="ipolicy_bounds_specs",
1063 type="multilistidentkeyval", default=None,
1064 help="Complete instance specs limits")
1065
1066 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1067 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1068 dest="ipolicy_std_specs",
1069 type="keyval", default=None,
1070 help="Complete standard instance specs")
1071
1072 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1073 dest="ipolicy_disk_templates",
1074 type="list", default=None,
1075 help="Comma-separated list of"
1076 " enabled disk templates")
1077
1078 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1079 dest="ipolicy_vcpu_ratio",
1080 type="maybefloat", default=None,
1081 help="The maximum allowed vcpu-to-cpu ratio")
1082
1083 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1084 dest="ipolicy_spindle_ratio",
1085 type="maybefloat", default=None,
1086 help=("The maximum allowed instances to"
1087 " spindle ratio"))
1088
1089 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1090 help="Hypervisor and hypervisor options, in the"
1091 " format hypervisor:option=value,option=value,...",
1092 default=None, type="identkeyval")
1093
1094 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1095 help="Hypervisor and hypervisor options, in the"
1096 " format hypervisor:option=value,option=value,...",
1097 default=[], action="append", type="identkeyval")
1098
1099 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1100 action="store_false",
1101 help="Don't check that the instance's IP"
1102 " is alive")
1103
1104 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1105 default=True, action="store_false",
1106 help="Don't check that the instance's name"
1107 " is resolvable")
1108
1109 NET_OPT = cli_option("--net",
1110 help="NIC parameters", default=[],
1111 dest="nics", action="append", type="identkeyval")
1112
1113 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1114 dest="disks", action="append", type="identkeyval")
1115
1116 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1117 help="Comma-separated list of disks"
1118 " indices to act on (e.g. 0,2) (optional,"
1119 " defaults to all disks)")
1120
1121 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1122 help="Enforces a single-disk configuration using the"
1123 " given disk size, in MiB unless a suffix is used",
1124 default=None, type="unit", metavar="<size>")
1125
1126 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1127 dest="ignore_consistency",
1128 action="store_true", default=False,
1129 help="Ignore the consistency of the disks on"
1130 " the secondary")
1131
1132 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1133 dest="allow_failover",
1134 action="store_true", default=False,
1135 help="If migration is not possible fallback to"
1136 " failover")
1137
1138 FORCE_FAILOVER_OPT = cli_option("--force-failover",
1139 dest="force_failover",
1140 action="store_true", default=False,
1141 help="Do not use migration, always use"
1142 " failover")
1143
1144 NONLIVE_OPT = cli_option("--non-live", dest="live",
1145 default=True, action="store_false",
1146 help="Do a non-live migration (this usually means"
1147 " freeze the instance, save the state, transfer and"
1148 " only then resume running on the secondary node)")
1149
1150 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1151 default=None,
1152 choices=list(constants.HT_MIGRATION_MODES),
1153 help="Override default migration mode (choose"
1154 " either live or non-live")
1155
1156 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1157 help="Target node and optional secondary node",
1158 metavar="<pnode>[:<snode>]",
1159 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1160
1161 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1162 action="append", metavar="<node>",
1163 help="Use only this node (can be used multiple"
1164 " times, if not given defaults to all nodes)",
1165 completion_suggest=OPT_COMPL_ONE_NODE)
1166
1167 NODEGROUP_OPT_NAME = "--node-group"
1168 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1169 dest="nodegroup",
1170 help="Node group (name or uuid)",
1171 metavar="<nodegroup>",
1172 default=None, type="string",
1173 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1174
1175 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1176 metavar="<node>",
1177 completion_suggest=OPT_COMPL_ONE_NODE)
1178
1179 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1180 action="store_false",
1181 help="Don't start the instance after creation")
1182
1183 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1184 action="store_true", default=False,
1185 help="Show command instead of executing it")
1186
1187 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1188 default=False, action="store_true",
1189 help="Instead of performing the migration/failover,"
1190 " try to recover from a failed cleanup. This is safe"
1191 " to run even if the instance is healthy, but it"
1192 " will create extra replication traffic and "
1193 " disrupt briefly the replication (like during the"
1194 " migration/failover")
1195
1196 STATIC_OPT = cli_option("-s", "--static", dest="static",
1197 action="store_true", default=False,
1198 help="Only show configuration data, not runtime data")
1199
1200 ALL_OPT = cli_option("--all", dest="show_all",
1201 default=False, action="store_true",
1202 help="Show info on all instances on the cluster."
1203 " This can take a long time to run, use wisely")
1204
1205 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1206 action="store_true", default=False,
1207 help="Interactive OS reinstall, lists available"
1208 " OS templates for selection")
1209
1210 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1211 action="store_true", default=False,
1212 help="Remove the instance from the cluster"
1213 " configuration even if there are failures"
1214 " during the removal process")
1215
1216 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1217 dest="ignore_remove_failures",
1218 action="store_true", default=False,
1219 help="Remove the instance from the"
1220 " cluster configuration even if there"
1221 " are failures during the removal"
1222 " process")
1223
1224 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1225 action="store_true", default=False,
1226 help="Remove the instance from the cluster")
1227
1228 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1229 help="Specifies the new node for the instance",
1230 metavar="NODE", default=None,
1231 completion_suggest=OPT_COMPL_ONE_NODE)
1232
1233 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1234 help="Specifies the new secondary node",
1235 metavar="NODE", default=None,
1236 completion_suggest=OPT_COMPL_ONE_NODE)
1237
1238 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1239 help="Specifies the new primary node",
1240 metavar="<node>", default=None,
1241 completion_suggest=OPT_COMPL_ONE_NODE)
1242
1243 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1244 default=False, action="store_true",
1245 help="Replace the disk(s) on the primary"
1246 " node (applies only to internally mirrored"
1247 " disk templates, e.g. %s)" %
1248 utils.CommaJoin(constants.DTS_INT_MIRROR))
1249
1250 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1251 default=False, action="store_true",
1252 help="Replace the disk(s) on the secondary"
1253 " node (applies only to internally mirrored"
1254 " disk templates, e.g. %s)" %
1255 utils.CommaJoin(constants.DTS_INT_MIRROR))
1256
1257 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1258 default=False, action="store_true",
1259 help="Lock all nodes and auto-promote as needed"
1260 " to MC status")
1261
1262 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1263 default=False, action="store_true",
1264 help="Automatically replace faulty disks"
1265 " (applies only to internally mirrored"
1266 " disk templates, e.g. %s)" %
1267 utils.CommaJoin(constants.DTS_INT_MIRROR))
1268
1269 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1270 default=False, action="store_true",
1271 help="Ignore current recorded size"
1272 " (useful for forcing activation when"
1273 " the recorded size is wrong)")
1274
1275 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1276 metavar="<node>",
1277 completion_suggest=OPT_COMPL_ONE_NODE)
1278
1279 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1280 metavar="<dir>")
1281
1282 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1283 help="Specify the secondary ip for the node",
1284 metavar="ADDRESS", default=None)
1285
1286 READD_OPT = cli_option("--readd", dest="readd",
1287 default=False, action="store_true",
1288 help="Readd old node after replacing it")
1289
1290 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1291 default=True, action="store_false",
1292 help="Disable SSH key fingerprint checking")
1293
1294 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1295 default=False, action="store_true",
1296 help="Force the joining of a node")
1297
1298 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1299 type="bool", default=None, metavar=_YORNO,
1300 help="Set the master_candidate flag on the node")
1301
1302 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1303 type="bool", default=None,
1304 help=("Set the offline flag on the node"
1305 " (cluster does not communicate with offline"
1306 " nodes)"))
1307
1308 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1309 type="bool", default=None,
1310 help=("Set the drained flag on the node"
1311 " (excluded from allocation operations)"))
1312
1313 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1314 type="bool", default=None, metavar=_YORNO,
1315 help="Set the master_capable flag on the node")
1316
1317 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1318 type="bool", default=None, metavar=_YORNO,
1319 help="Set the vm_capable flag on the node")
1320
1321 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1322 type="bool", default=None, metavar=_YORNO,
1323 help="Set the allocatable flag on a volume")
1324
1325 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1326 dest="enabled_hypervisors",
1327 help="Comma-separated list of hypervisors",
1328 type="string", default=None)
1329
1330 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1331 dest="enabled_disk_templates",
1332 help="Comma-separated list of "
1333 "disk templates",
1334 type="string", default=None)
1335
1336 ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
1337 default=None,
1338 dest="enabled_user_shutdown",
1339 help="Whether user shutdown is enabled",
1340 type="bool")
1341
1342 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1343 type="keyval", default={},
1344 help="NIC parameters")
1345
1346 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1347 dest="candidate_pool_size", type="int",
1348 help="Set the candidate pool size")
1349
1350 RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1351 type="int", help="Set the maximal number of jobs to "
1352 "run simultaneously")
1353
1354 MAX_TRACK_OPT = cli_option("--max-tracked-jobs", dest="max_tracked_jobs",
1355 type="int", help="Set the maximal number of jobs to "
1356 "be tracked simultaneously for "
1357 "scheduling")
1358
1359 COMPRESSION_TOOLS_OPT = \
1360 cli_option("--compression-tools",
1361 dest="compression_tools", type="string", default=None,
1362 help="Comma-separated list of compression tools which are"
1363 " allowed to be used by Ganeti in various operations")
1364
1365 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1366 help=("Enables LVM and specifies the volume group"
1367 " name (cluster-wide) for disk allocation"
1368 " [%s]" % constants.DEFAULT_VG),
1369 metavar="VG", default=None)
1370
1371 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1372 help="Destroy cluster", action="store_true")
1373
1374 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1375 help="Skip node agreement check (dangerous)",
1376 action="store_true", default=False)
1377
1378 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1379 help="Specify the mac prefix for the instance IP"
1380 " addresses, in the format XX:XX:XX",
1381 metavar="PREFIX",
1382 default=None)
1383
1384 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1385 help="Specify the node interface (cluster-wide)"
1386 " on which the master IP address will be added"
1387 " (cluster init default: %s)" %
1388 constants.DEFAULT_BRIDGE,
1389 metavar="NETDEV",
1390 default=None)
1391
1392 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1393 help="Specify the netmask of the master IP",
1394 metavar="NETMASK",
1395 default=None)
1396
1397 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1398 dest="use_external_mip_script",
1399 help="Specify whether to run a"
1400 " user-provided script for the master"
1401 " IP address turnup and"
1402 " turndown operations",
1403 type="bool", metavar=_YORNO, default=None)
1404
1405 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1406 help="Specify the default directory (cluster-"
1407 "wide) for storing the file-based disks [%s]" %
1408 pathutils.DEFAULT_FILE_STORAGE_DIR,
1409 metavar="DIR",
1410 default=None)
1411
1412 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1413 "--shared-file-storage-dir",
1414 dest="shared_file_storage_dir",
1415 help="Specify the default directory (cluster-wide) for storing the"
1416 " shared file-based disks [%s]" %
1417 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1418 metavar="SHAREDDIR", default=None)
1419
1420 GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1421 "--gluster-storage-dir",
1422 dest="gluster_storage_dir",
1423 help="Specify the default directory (cluster-wide) for mounting Gluster"
1424 " file systems [%s]" %
1425 pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1426 metavar="GLUSTERDIR",
1427 default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1428
1429 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1430 help="Don't modify %s" % pathutils.ETC_HOSTS,
1431 action="store_false", default=True)
1432
1433 MODIFY_ETCHOSTS_OPT = \
1434 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1435 default=None, type="bool",
1436 help="Defines whether the cluster should autonomously modify"
1437 " and keep in sync the /etc/hosts file of the nodes")
1438
1439 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1440 help="Don't initialize SSH keys",
1441 action="store_false", default=True)
1442
1443 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1444 help="Enable parseable error messages",
1445 action="store_true", default=False)
1446
1447 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1448 help="Skip N+1 memory redundancy tests",
1449 action="store_true", default=False)
1450
1451 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1452 help="Type of reboot: soft/hard/full",
1453 default=constants.INSTANCE_REBOOT_HARD,
1454 metavar="<REBOOT>",
1455 choices=list(constants.REBOOT_TYPES))
1456
1457 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1458 dest="ignore_secondaries",
1459 default=False, action="store_true",
1460 help="Ignore errors from secondaries")
1461
1462 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1463 action="store_false", default=True,
1464 help="Don't shutdown the instance (unsafe)")
1465
1466 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1467 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1468 help="Maximum time to wait")
1469
1470 COMPRESS_OPT = cli_option("--compress", dest="compress",
1471 type="string", default=constants.IEC_NONE,
1472 help="The compression mode to use")
1473
1474 TRANSPORT_COMPRESSION_OPT = \
1475 cli_option("--transport-compression", dest="transport_compression",
1476 type="string", default=constants.IEC_NONE,
1477 help="The compression mode to use during transport")
1478
1479 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1480 dest="shutdown_timeout", type="int",
1481 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1482 help="Maximum time to wait for instance"
1483 " shutdown")
1484
1485 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1486 default=None,
1487 help=("Number of seconds between repetions of the"
1488 " command"))
1489
1490 EARLY_RELEASE_OPT = cli_option("--early-release",
1491 dest="early_release", default=False,
1492 action="store_true",
1493 help="Release the locks on the secondary"
1494 " node(s) early")
1495
1496 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1497 dest="new_cluster_cert",
1498 default=False, action="store_true",
1499 help="Generate a new cluster certificate")
1500
1501 NEW_NODE_CERT_OPT = cli_option(
1502 "--new-node-certificates", dest="new_node_cert", default=False,
1503 action="store_true", help="Generate new node certificates (for all nodes)")
1504
1505 NEW_SSH_KEY_OPT = cli_option(
1506 "--new-ssh-keys", dest="new_ssh_keys", default=False,
1507 action="store_true", help="Generate new node SSH keys (for all nodes)")
1508
1509 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1510 default=None,
1511 help="File containing new RAPI certificate")
1512
1513 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1514 default=None, action="store_true",
1515 help=("Generate a new self-signed RAPI"
1516 " certificate"))
1517
1518 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1519 default=None,
1520 help="File containing new SPICE certificate")
1521
1522 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1523 default=None,
1524 help="File containing the certificate of the CA"
1525 " which signed the SPICE certificate")
1526
1527 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1528 dest="new_spice_cert", default=None,
1529 action="store_true",
1530 help=("Generate a new self-signed SPICE"
1531 " certificate"))
1532
1533 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1534 dest="new_confd_hmac_key",
1535 default=False, action="store_true",
1536 help=("Create a new HMAC key for %s" %
1537 constants.CONFD))
1538
1539 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1540 dest="cluster_domain_secret",
1541 default=None,
1542 help=("Load new new cluster domain"
1543 " secret from file"))
1544
1545 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1546 dest="new_cluster_domain_secret",
1547 default=False, action="store_true",
1548 help=("Create a new cluster domain"
1549 " secret"))
1550
1551 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1552 dest="use_replication_network",
1553 help="Whether to use the replication network"
1554 " for talking to the nodes",
1555 action="store_true", default=False)
1556
1557 MAINTAIN_NODE_HEALTH_OPT = \
1558 cli_option("--maintain-node-health", dest="maintain_node_health",
1559 metavar=_YORNO, default=None, type="bool",
1560 help="Configure the cluster to automatically maintain node"
1561 " health, by shutting down unknown instances, shutting down"
1562 " unknown DRBD devices, etc.")
1563
1564 IDENTIFY_DEFAULTS_OPT = \
1565 cli_option("--identify-defaults", dest="identify_defaults",
1566 default=False, action="store_true",
1567 help="Identify which saved instance parameters are equal to"
1568 " the current cluster defaults and set them as such, instead"
1569 " of marking them as overridden")
1570
1571 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1572 action="store", dest="uid_pool",
1573 help=("A list of user-ids or user-id"
1574 " ranges separated by commas"))
1575
1576 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1577 action="store", dest="add_uids",
1578 help=("A list of user-ids or user-id"
1579 " ranges separated by commas, to be"
1580 " added to the user-id pool"))
1581
1582 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1583 action="store", dest="remove_uids",
1584 help=("A list of user-ids or user-id"
1585 " ranges separated by commas, to be"
1586 " removed from the user-id pool"))
1587
1588 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1589 action="store", dest="reserved_lvs",
1590 help=("A comma-separated list of reserved"
1591 " logical volumes names, that will be"
1592 " ignored by cluster verify"))
1593
1594 ROMAN_OPT = cli_option("--roman",
1595 dest="roman_integers", default=False,
1596 action="store_true",
1597 help="Use roman numbers for positive integers")
1598
1599 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1600 action="store", default=None,
1601 help="Specifies usermode helper for DRBD")
1602
1603 PRIMARY_IP_VERSION_OPT = \
1604 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1605 action="store", dest="primary_ip_version",
1606 metavar="%d|%d" % (constants.IP4_VERSION,
1607 constants.IP6_VERSION),
1608 help="Cluster-wide IP version for primary IP")
1609
1610 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1611 action="store_true",
1612 help="Show machine name for every line in output")
1613
1614 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1615 action="store_true",
1616 help=("Hide successful results and show failures"
1617 " only (determined by the exit code)"))
1618
1619 REASON_OPT = cli_option("--reason", default=None,
1620 help="The reason for executing the command")
1621
1622
1623 def _PriorityOptionCb(option, _, value, parser):
1624 """Callback for processing C{--priority} option.
1625
1626 """
1627 value = _PRIONAME_TO_VALUE[value]
1628
1629 setattr(parser.values, option.dest, value)
1630
1631
1632 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1633 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1634 choices=_PRIONAME_TO_VALUE.keys(),
1635 action="callback", type="choice",
1636 callback=_PriorityOptionCb,
1637 help="Priority for opcode processing")
1638
1639 OPPORTUNISTIC_OPT = cli_option("--opportunistic-locking",
1640 dest="opportunistic_locking",
1641 action="store_true", default=False,
1642 help="Opportunistically acquire locks")
1643
1644 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1645 type="bool", default=None, metavar=_YORNO,
1646 help="Sets the hidden flag on the OS")
1647
1648 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1649 type="bool", default=None, metavar=_YORNO,
1650 help="Sets the blacklisted flag on the OS")
1651
1652 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1653 type="bool", metavar=_YORNO,
1654 dest="prealloc_wipe_disks",
1655 help=("Wipe disks prior to instance"
1656 " creation"))
1657
1658 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1659 type="keyval", default=None,
1660 help="Node parameters")
1661
1662 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1663 action="store", metavar="POLICY", default=None,
1664 help="Allocation policy for the node group")
1665
1666 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1667 type="bool", metavar=_YORNO,
1668 dest="node_powered",
1669 help="Specify if the SoR for node is powered")
1670
1671 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1672 default=constants.OOB_TIMEOUT,
1673 help="Maximum time to wait for out-of-band helper")
1674
1675 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1676 default=constants.OOB_POWER_DELAY,
1677 help="Time in seconds to wait between power-ons")
1678
1679 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1680 action="store_true", default=False,
1681 help=("Whether command argument should be treated"
1682 " as filter"))
1683
1684 NO_REMEMBER_OPT = cli_option("--no-remember",
1685 dest="no_remember",
1686 action="store_true", default=False,
1687 help="Perform but do not record the change"
1688 " in the configuration")
1689
1690 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1691 default=False, action="store_true",
1692 help="Evacuate primary instances only")
1693
1694 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1695 default=False, action="store_true",
1696 help="Evacuate secondary instances only"
1697 " (applies only to internally mirrored"
1698 " disk templates, e.g. %s)" %
1699 utils.CommaJoin(constants.DTS_INT_MIRROR))
1700
1701 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1702 action="store_true", default=False,
1703 help="Pause instance at startup")
1704
1705 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1706 help="Destination node group (name or uuid)",
1707 default=None, action="append",
1708 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1709
1710 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1711 action="append", dest="ignore_errors",
1712 choices=list(constants.CV_ALL_ECODES_STRINGS),
1713 help="Error code to be ignored")
1714
1715 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1716 action="append",
1717 help=("Specify disk state information in the"
1718 " format"
1719 " storage_type/identifier:option=value,...;"
1720 " note this is unused for now"),
1721 type="identkeyval")
1722
1723 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1724 action="append",
1725 help=("Specify hypervisor state information in the"
1726 " format hypervisor:option=value,...;"
1727 " note this is unused for now"),
1728 type="identkeyval")
1729
1730 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1731 action="store_true", default=False,
1732 help="Ignore instance policy violations")
1733
1734 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1735 help="Sets the instance's runtime memory,"
1736 " ballooning it up or down to the new value",
1737 default=None, type="unit", metavar="<size>")
1738
1739 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1740 action="store_true", default=False,
1741 help="Marks the grow as absolute instead of the"
1742 " (default) relative mode")
1743
1744 NETWORK_OPT = cli_option("--network",
1745 action="store", default=None, dest="network",
1746 help="IP network in CIDR notation")
1747
1748 GATEWAY_OPT = cli_option("--gateway",
1749 action="store", default=None, dest="gateway",
1750 help="IP address of the router (gateway)")
1751
1752 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1753 action="store", default=None,
1754 dest="add_reserved_ips",
1755 help="Comma-separated list of"
1756 " reserved IPs to add")
1757
1758 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1759 action="store", default=None,
1760 dest="remove_reserved_ips",
1761 help="Comma-delimited list of"
1762 " reserved IPs to remove")
1763
1764 NETWORK6_OPT = cli_option("--network6",
1765 action="store", default=None, dest="network6",
1766 help="IP network in CIDR notation")
1767
1768 GATEWAY6_OPT = cli_option("--gateway6",
1769 action="store", default=None, dest="gateway6",
1770 help="IP6 address of the router (gateway)")
1771
1772 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1773 dest="conflicts_check",
1774 default=True,
1775 action="store_false",
1776 help="Don't check for conflicting IPs")
1777
1778 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1779 default=False, action="store_true",
1780 help="Include default values")
1781
1782 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1783 action="store_true", default=False,
1784 help="Hotplug supported devices (NICs and Disks)")
1785
1786 HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1787 dest="hotplug_if_possible",
1788 action="store_true", default=False,
1789 help="Hotplug devices in case"
1790 " hotplug is supported")
1791
1792 INSTALL_IMAGE_OPT = \
1793 cli_option("--install-image",
1794 dest="install_image",
1795 action="store",
1796 type="string",
1797 default=None,
1798 help="The OS image to use for running the OS scripts safely")
1799
1800 INSTANCE_COMMUNICATION_OPT = \
1801 cli_option("-c", "--communication",
1802 dest="instance_communication",
1803 help=constants.INSTANCE_COMMUNICATION_DOC,
1804 type="bool")
1805
1806 INSTANCE_COMMUNICATION_NETWORK_OPT = \
1807 cli_option("--instance-communication-network",
1808 dest="instance_communication_network",
1809 type="string",
1810 help="Set the network name for instance communication")
1811
1812 ZEROING_IMAGE_OPT = \
1813 cli_option("--zeroing-image",
1814 dest="zeroing_image", action="store", default=None,
1815 help="The OS image to use to zero instance disks")
1816
1817 ZERO_FREE_SPACE_OPT = \
1818 cli_option("--zero-free-space",
1819 dest="zero_free_space", action="store_true", default=False,
1820 help="Whether to zero the free space on the disks of the "
1821 "instance prior to the export")
1822
1823 HELPER_STARTUP_TIMEOUT_OPT = \
1824 cli_option("--helper-startup-timeout",
1825 dest="helper_startup_timeout", action="store", type="int",
1826 help="Startup timeout for the helper VM")
1827
1828 HELPER_SHUTDOWN_TIMEOUT_OPT = \
1829 cli_option("--helper-shutdown-timeout",
1830 dest="helper_shutdown_timeout", action="store", type="int",
1831 help="Shutdown timeout for the helper VM")
1832
1833 ZEROING_TIMEOUT_FIXED_OPT = \
1834 cli_option("--zeroing-timeout-fixed",
1835 dest="zeroing_timeout_fixed", action="store", type="int",
1836 help="The fixed amount of time to wait before assuming that the "
1837 "zeroing failed")
1838
1839 ZEROING_TIMEOUT_PER_MIB_OPT = \
1840 cli_option("--zeroing-timeout-per-mib",
1841 dest="zeroing_timeout_per_mib", action="store", type="float",
1842 help="The amount of time to wait per MiB of data to zero, in "
1843 "addition to the fixed timeout")
1844
1845 ENABLED_DATA_COLLECTORS_OPT = \
1846 cli_option("--enabled-data-collectors",
1847 dest="enabled_data_collectors", type="keyval",
1848 default={},
1849 help="Deactivate or reactivate a data collector for reporting, "
1850 "in the format collector=bool, where collector is one of %s."
1851 % ", ".join(constants.DATA_COLLECTOR_NAMES))
1852
1853
1854 #: Options provided by all commands
1855 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1856
1857 # options related to asynchronous job handling
1858
1859 SUBMIT_OPTS = [
1860 SUBMIT_OPT,
1861 PRINT_JOBID_OPT,
1862 ]
1863
1864 # common options for creating instances. add and import then add their own
1865 # specific ones.
1866 COMMON_CREATE_OPTS = [
1867 BACKEND_OPT,
1868 DISK_OPT,
1869 DISK_TEMPLATE_OPT,
1870 FILESTORE_DIR_OPT,
1871 FILESTORE_DRIVER_OPT,
1872 HYPERVISOR_OPT,
1873 IALLOCATOR_OPT,
1874 NET_OPT,
1875 NODE_PLACEMENT_OPT,
1876 NODEGROUP_OPT,
1877 NOIPCHECK_OPT,
1878 NOCONFLICTSCHECK_OPT,
1879 NONAMECHECK_OPT,
1880 NONICS_OPT,
1881 NWSYNC_OPT,
1882 OSPARAMS_OPT,
1883 OSPARAMS_PRIVATE_OPT,
1884 OSPARAMS_SECRET_OPT,
1885 OS_SIZE_OPT,
1886 OPPORTUNISTIC_OPT,
1887 SUBMIT_OPT,
1888 PRINT_JOBID_OPT,
1889 TAG_ADD_OPT,
1890 DRY_RUN_OPT,
1891 PRIORITY_OPT,
1892 ]
1893
1894 # common instance policy options
1895 INSTANCE_POLICY_OPTS = [
1896 IPOLICY_BOUNDS_SPECS_OPT,
1897 IPOLICY_DISK_TEMPLATES,
1898 IPOLICY_VCPU_RATIO,
1899 IPOLICY_SPINDLE_RATIO,
1900 ]
1901
1902 # instance policy split specs options
1903 SPLIT_ISPECS_OPTS = [
1904 SPECS_CPU_COUNT_OPT,
1905 SPECS_DISK_COUNT_OPT,
1906 SPECS_DISK_SIZE_OPT,
1907 SPECS_MEM_SIZE_OPT,
1908 SPECS_NIC_COUNT_OPT,
1909 ]
1910
1911
1912 class _ShowUsage(Exception):
1913 """Exception class for L{_ParseArgs}.
1914
1915 """
1916 def __init__(self, exit_error):
1917 """Initializes instances of this class.
1918
1919 @type exit_error: bool
1920 @param exit_error: Whether to report failure on exit
1921
1922 """
1923 Exception.__init__(self)
1924 self.exit_error = exit_error
1925
1926
1927 class _ShowVersion(Exception):
1928 """Exception class for L{_ParseArgs}.
1929
1930 """
1931
1932
1933 def _ParseArgs(binary, argv, commands, aliases, env_override):
1934 """Parser for the command line arguments.
1935
1936 This function parses the arguments and returns the function which
1937 must be executed together with its (modified) arguments.
1938
1939 @param binary: Script name
1940 @param argv: Command line arguments
1941 @param commands: Dictionary containing command definitions
1942 @param aliases: dictionary with command aliases {"alias": "target", ...}
1943 @param env_override: list of env variables allowed for default args
1944 @raise _ShowUsage: If usage description should be shown
1945 @raise _ShowVersion: If version should be shown
1946
1947 """
1948 assert not (env_override - set(commands))
1949 assert not (set(aliases.keys()) & set(commands.keys()))
1950
1951 if len(argv) > 1:
1952 cmd = argv[1]
1953 else:
1954 # No option or command given
1955 raise _ShowUsage(exit_error=True)
1956
1957 if cmd == "--version":
1958 raise _ShowVersion()
1959 elif cmd == "--help":
1960 raise _ShowUsage(exit_error=False)
1961 elif not (cmd in commands or cmd in aliases):
1962 raise _ShowUsage(exit_error=True)
1963
1964 # get command, unalias it, and look it up in commands
1965 if cmd in aliases:
1966 if aliases[cmd] not in commands:
1967 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1968 " command '%s'" % (cmd, aliases[cmd]))
1969
1970 cmd = aliases[cmd]
1971
1972 if cmd in env_override:
1973 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1974 env_args = os.environ.get(args_env_name)
1975 if env_args:
1976 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1977
1978 func, args_def, parser_opts, usage, description = commands[cmd]
1979 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1980 description=description,
1981 formatter=TitledHelpFormatter(),
1982 usage="%%prog %s %s" % (cmd, usage))
1983 parser.disable_interspersed_args()
1984 options, args = parser.parse_args(args=argv[2:])
1985
1986 if not _CheckArguments(cmd, args_def, args):
1987 return None, None, None
1988
1989 return func, options, args
1990
1991
1992 def _FormatUsage(binary, commands):
1993 """Generates a nice description of all commands.
1994
1995 @param binary: Script name
1996 @param commands: Dictionary containing command definitions
1997
1998 """
1999 # compute the max line length for cmd + usage
2000 mlen = min(60, max(map(len, commands)))
2001
2002 yield "Usage: %s {command} [options...] [argument...]" % binary
2003 yield "%s <command> --help to see details, or man %s" % (binary, binary)
2004 yield ""
2005 yield "Commands:"
2006
2007 # and format a nice command list
2008 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
2009 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
2010 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
2011 for line in help_lines:
2012 yield " %-*s %s" % (mlen, "", line)
2013
2014 yield ""
2015
2016
2017 def _CheckArguments(cmd, args_def, args):
2018 """Verifies the arguments using the argument definition.
2019
2020 Algorithm:
2021
2022 1. Abort with error if values specified by user but none expected.
2023
2024 1. For each argument in definition
2025
2026 1. Keep running count of minimum number of values (min_count)
2027 1. Keep running count of maximum number of values (max_count)
2028 1. If it has an unlimited number of values
2029
2030 1. Abort with error if it's not the last argument in the definition
2031
2032 1. If last argument has limited number of values
2033
2034 1. Abort with error if number of values doesn't match or is too large
2035
2036 1. Abort with error if user didn't pass enough values (min_count)
2037
2038 """
2039 if args and not args_def:
2040 ToStderr("Error: Command %s expects no arguments", cmd)
2041 return False
2042
2043 min_count = None
2044 max_count = None
2045 check_max = None
2046
2047 last_idx = len(args_def) - 1
2048
2049 for idx, arg in enumerate(args_def):
2050 if min_count is None:
2051 min_count = arg.min
2052 elif arg.min is not None:
2053 min_count += arg.min
2054
2055 if max_count is None:
2056 max_count = arg.max
2057 elif arg.max is not None:
2058 max_count += arg.max
2059
2060 if idx == last_idx:
2061 check_max = (arg.max is not None)
2062
2063 elif arg.max is None:
2064 raise errors.ProgrammerError("Only the last argument can have max=None")
2065
2066 if check_max:
2067 # Command with exact number of arguments
2068 if (min_count is not None and max_count is not None and
2069 min_count == max_count and len(args) != min_count):
2070 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
2071 return False
2072
2073 # Command with limited number of arguments
2074 if max_count is not None and len(args) > max_count:
2075 ToStderr("Error: Command %s expects only %d argument(s)",
2076 cmd, max_count)
2077 return False
2078
2079 # Command with some required arguments
2080 if min_count is not None and len(args) < min_count:
2081 ToStderr("Error: Command %s expects at least %d argument(s)",
2082 cmd, min_count)
2083 return False
2084
2085 return True
2086
2087
2088 def SplitNodeOption(value):
2089 """Splits the value of a --node option.
2090
2091 """
2092 if value and ":" in value:
2093 return value.split(":", 1)
2094 else:
2095 return (value, None)
2096
2097
2098 def CalculateOSNames(os_name, os_variants):
2099 """Calculates all the names an OS can be called, according to its variants.
2100
2101 @type os_name: string
2102 @param os_name: base name of the os
2103 @type os_variants: list or None
2104 @param os_variants: list of supported variants
2105 @rtype: list
2106 @return: list of valid names
2107
2108 """
2109 if os_variants:
2110 return ["%s+%s" % (os_name, v) for v in os_variants]
2111 else:
2112 return [os_name]
2113
2114
2115 def ParseFields(selected, default):
2116 """Parses the values of "--field"-like options.
2117
2118 @type selected: string or None
2119 @param selected: User-selected options
2120 @type default: list
2121 @param default: Default fields
2122
2123 """
2124 if selected is None:
2125 return default
2126
2127 if selected.startswith("+"):
2128 return default + selected[1:].split(",")
2129
2130 return selected.split(",")
2131
2132
2133 UsesRPC = rpc.RunWithRPC
2134
2135
2136 def AskUser(text, choices=None):
2137 """Ask the user a question.
2138
2139 @param text: the question to ask
2140
2141 @param choices: list with elements tuples (input_char, return_value,
2142 description); if not given, it will default to: [('y', True,
2143 'Perform the operation'), ('n', False, 'Do no do the operation')];
2144 note that the '?' char is reserved for help
2145
2146 @return: one of the return values from the choices list; if input is
2147 not possible (i.e. not running with a tty, we return the last
2148 entry from the list
2149
2150 """
2151 if choices is None:
2152 choices = [("y", True, "Perform the operation"),
2153 ("n", False, "Do not perform the operation")]
2154 if not choices or not isinstance(choices, list):
2155 raise errors.ProgrammerError("Invalid choices argument to AskUser")
2156 for entry in choices:
2157 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
2158 raise errors.ProgrammerError("Invalid choices element to AskUser")
2159
2160 answer = choices[-1][1]
2161 new_text = []
2162 for line in text.splitlines():
2163 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
2164 text = "\n".join(new_text)
2165 try:
2166 f = file("/dev/tty", "a+")
2167 except IOError:
2168 return answer
2169 try:
2170 chars = [entry[0] for entry in choices]
2171 chars[-1] = "[%s]" % chars[-1]
2172 chars.append("?")
2173 maps = dict([(entry[0], entry[1]) for entry in choices])
2174 while True:
2175 f.write(text)
2176 f.write("\n")
2177 f.write("/".join(chars))
2178 f.write(": ")
2179 line = f.readline(2).strip().lower()
2180 if line in maps:
2181 answer = maps[line]
2182 break
2183 elif line == "?":
2184 for entry in choices:
2185 f.write(" %s - %s\n" % (entry[0], entry[2]))
2186 f.write("\n")
2187 continue
2188 finally:
2189 f.close()
2190 return answer
2191
2192
2193 class JobSubmittedException(Exception):
2194 """Job was submitted, client should exit.
2195
2196 This exception has one argument, the ID of the job that was
2197 submitted. The handler should print this ID.
2198
2199 This is not an error, just a structured way to exit from clients.
2200
2201 """
2202
2203
2204 def SendJob(ops, cl=None):
2205 """Function to submit an opcode without waiting for the results.
2206
2207 @type ops: list
2208 @param ops: list of opcodes
2209 @type cl: luxi.Client
2210 @param cl: the luxi client to use for communicating with the master;
2211 if None, a new client will be created
2212
2213 """
2214 if cl is None:
2215 cl = GetClient()
2216
2217 job_id = cl.SubmitJob(ops)
2218
2219 return job_id
2220
2221
2222 def GenericPollJob(job_id, cbs, report_cbs):
2223 """Generic job-polling function.
2224
2225 @type job_id: number
2226 @param job_id: Job ID
2227 @type cbs: Instance of L{JobPollCbBase}
2228 @param cbs: Data callbacks
2229 @type report_cbs: Instance of L{JobPollReportCbBase}
2230 @param report_cbs: Reporting callbacks
2231
2232 """
2233 prev_job_info = None
2234 prev_logmsg_serial = None
2235
2236 status = None
2237
2238 while True:
2239 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2240 prev_logmsg_serial)
2241 if not result:
2242 # job not found, go away!
2243 raise errors.JobLost("Job with id %s lost" % job_id)
2244
2245 if result == constants.JOB_NOTCHANGED:
2246 report_cbs.ReportNotChanged(job_id, status)
2247
2248 # Wait again
2249 continue
2250
2251 # Split result, a tuple of (field values, log entries)
2252 (job_info, log_entries) = result
2253 (status, ) = job_info
2254
2255 if log_entries:
2256 for log_entry in log_entries:
2257 (serial, timestamp, log_type, message) = log_entry
2258 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2259 log_type, message)
2260 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2261
2262 # TODO: Handle canceled and archived jobs
2263 elif status in (constants.JOB_STATUS_SUCCESS,
2264 constants.JOB_STATUS_ERROR,
2265 constants.JOB_STATUS_CANCELING,
2266 constants.JOB_STATUS_CANCELED):
2267 break
2268
2269 prev_job_info = job_info
2270
2271 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2272 if not jobs:
2273 raise errors.JobLost("Job with id %s lost" % job_id)
2274
2275 status, opstatus, result = jobs[0]
2276
2277 if status == constants.JOB_STATUS_SUCCESS:
2278 return result
2279
2280 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2281 raise errors.OpExecError("Job was canceled")
2282
2283 has_ok = False
2284 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2285 if status == constants.OP_STATUS_SUCCESS:
2286 has_ok = True
2287 elif status == constants.OP_STATUS_ERROR:
2288 errors.MaybeRaise(msg)
2289
2290 if has_ok:
2291 raise errors.OpExecError("partial failure (opcode %d): %s" %
2292 (idx, msg))
2293
2294 raise errors.OpExecError(str(msg))
2295
2296 # default failure mode
2297 raise errors.OpExecError(result)
2298
2299
2300 class JobPollCbBase(object):
2301 """Base class for L{GenericPollJob} callbacks.
2302
2303 """
2304 def __init__(self):
2305 """Initializes this class.
2306
2307 """
2308
2309 def WaitForJobChangeOnce(self, job_id, fields,
2310 prev_job_info, prev_log_serial):
2311 """Waits for changes on a job.
2312
2313 """
2314 raise NotImplementedError()
2315
2316 def QueryJobs(self, job_ids, fields):
2317 """Returns the selected fields for the selected job IDs.
2318
2319 @type job_ids: list of numbers
2320 @param job_ids: Job IDs
2321 @type fields: list of strings
2322 @param fields: Fields
2323
2324 """
2325 raise NotImplementedError()
2326
2327
2328 class JobPollReportCbBase(object):
2329 """Base class for L{GenericPollJob} reporting callbacks.
2330
2331 """
2332 def __init__(self):
2333 """Initializes this class.
2334
2335 """
2336
2337 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2338 """Handles a log message.
2339
2340 """
2341 raise NotImplementedError()
2342
2343 def ReportNotChanged(self, job_id, status):
2344 """Called for if a job hasn't changed in a while.
2345
2346 @type job_id: number
2347 @param job_id: Job ID
2348 @type status: string or None
2349 @param status: Job status if available
2350
2351 """
2352 raise NotImplementedError()
2353
2354
2355 class _LuxiJobPollCb(JobPollCbBase):
2356 def __init__(self, cl):
2357 """Initializes this class.
2358
2359 """
2360 JobPollCbBase.__init__(self)
2361 self.cl = cl
2362
2363 def WaitForJobChangeOnce(self, job_id, fields,
2364 prev_job_info, prev_log_serial):
2365 """Waits for changes on a job.
2366
2367 """
2368 return self.cl.WaitForJobChangeOnce(job_id, fields,
2369 prev_job_info, prev_log_serial)
2370
2371 def QueryJobs(self, job_ids, fields):
2372 """Returns the selected fields for the selected job IDs.
2373
2374 """
2375 return self.cl.QueryJobs(job_ids, fields)
2376
2377
2378 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2379 def __init__(self, feedback_fn):
2380 """Initializes this class.
2381
2382 """
2383 JobPollReportCbBase.__init__(self)
2384
2385 self.feedback_fn = feedback_fn
2386
2387 assert callable(feedback_fn)
2388
2389 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2390 """Handles a log message.
2391
2392 """
2393 self.feedback_fn((timestamp, log_type, log_msg))
2394
2395 def ReportNotChanged(self, job_id, status):
2396 """Called if a job hasn't changed in a while.
2397
2398 """
2399 # Ignore
2400
2401
2402 class StdioJobPollReportCb(JobPollReportCbBase):
2403 def __init__(self):
2404 """Initializes this class.
2405
2406 """
2407 JobPollReportCbBase.__init__(self)
2408
2409 self.notified_queued = False
2410 self.notified_waitlock = False
2411
2412 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2413 """Handles a log message.
2414
2415 """
2416 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2417 FormatLogMessage(log_type, log_msg))
2418
2419 def ReportNotChanged(self, job_id, status):
2420 """Called if a job hasn't changed in a while.
2421
2422 """
2423 if status is None:
2424 return
2425
2426 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2427 ToStderr("Job %s is waiting in queue", job_id)
2428 self.notified_queued = True
2429
2430 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2431 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2432 self.notified_waitlock = True
2433
2434
2435 def FormatLogMessage(log_type, log_msg):
2436 """Formats a job message according to its type.
2437
2438 """
2439 if log_type != constants.ELOG_MESSAGE:
2440 log_msg = str(log_msg)
2441
2442 return utils.SafeEncode(log_msg)
2443
2444
2445 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2446 """Function to poll for the result of a job.
2447
2448 @type job_id: job identified
2449 @param job_id: the job to poll for results
2450 @type cl: luxi.Client
2451 @param cl: the luxi client to use for communicating with the master;
2452 if None, a new client will be created
2453
2454 """
2455 if cl is None:
2456 cl = GetClient()
2457
2458 if reporter is None:
2459 if feedback_fn:
2460 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2461 else:
2462 reporter = StdioJobPollReportCb()
2463 elif feedback_fn:
2464 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2465
2466 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2467
2468
2469 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2470 """Legacy function to submit an opcode.
2471
2472 This is just a simple wrapper over the construction of the processor
2473 instance. It should be extended to better handle feedback and
2474 interaction functions.
2475
2476 """
2477 if cl is None:
2478 cl = GetClient()
2479
2480 SetGenericOpcodeOpts([op], opts)
2481
2482 job_id = SendJob([op], cl=cl)
2483 if hasattr(opts, "print_jobid") and opts.print_jobid:
2484 ToStdout("%d" % job_id)
2485
2486 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2487 reporter=reporter)
2488
2489 return op_results[0]
2490
2491
2492 def SubmitOpCodeToDrainedQueue(op):
2493 """Forcefully insert a job in the queue, even if it is drained.
2494
2495 """
2496 cl = GetClient()
2497 job_id = cl.SubmitJobToDrainedQueue([op])
2498 op_results = PollJob(job_id, cl=cl)
2499 return op_results[0]
2500
2501
2502 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2503 """Wrapper around SubmitOpCode or SendJob.
2504
2505 This function will decide, based on the 'opts' parameter, whether to
2506 submit and wait for the result of the opcode (and return it), or
2507 whether to just send the job and print its identifier. It is used in
2508 order to simplify the implementation of the '--submit' option.
2509
2510 It will also process the opcodes if we're sending the via SendJob
2511 (otherwise SubmitOpCode does it).
2512
2513 """
2514 if opts and opts.submit_only:
2515 job = [op]
2516 SetGenericOpcodeOpts(job, opts)
2517 job_id = SendJob(job, cl=cl)
2518 if opts.print_jobid:
2519 ToStdout("%d" % job_id)
2520 raise JobSubmittedException(job_id)
2521 else:
2522 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2523
2524
2525 def _InitReasonTrail(op, opts):
2526 """Builds the first part of the reason trail
2527
2528 Builds the initial part of the reason trail, adding the user provided reason
2529 (if it exists) and the name of the command starting the operation.
2530
2531 @param op: the opcode the reason trail will be added to
2532 @param opts: the command line options selected by the user
2533
2534 """
2535 assert len(sys.argv) >= 2
2536 trail = []
2537
2538 if opts.reason:
2539 trail.append((constants.OPCODE_REASON_SRC_USER,
2540 opts.reason,
2541 utils.EpochNano()))
2542
2543 binary = os.path.basename(sys.argv[0])
2544 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2545 command = sys.argv[1]
2546 trail.append((source, command, utils.EpochNano()))
2547 op.reason = trail
2548
2549
2550 def SetGenericOpcodeOpts(opcode_list, options):
2551 """Processor for generic options.
2552
2553 This function updates the given opcodes based on generic command
2554 line options (like debug, dry-run, etc.).
2555
2556 @param opcode_list: list of opcodes
2557 @param options: command line options or None
2558 @return: None (in-place modification)
2559
2560 """
2561 if not options:
2562 return
2563 for op in opcode_list:
2564 op.debug_level = options.debug
2565 if hasattr(options, "dry_run"):
2566 op.dry_run = options.dry_run
2567 if getattr(options, "priority", None) is not None:
2568 op.priority = options.priority
2569 _InitReasonTrail(op, options)
2570
2571
2572 def FormatError(err):
2573 """Return a formatted error message for a given error.
2574
2575 This function takes an exception instance and returns a tuple
2576 consisting of two values: first, the recommended exit code, and
2577 second, a string describing the error message (not
2578 newline-terminated).
2579
2580 """
2581 retcode = 1
2582 obuf = StringIO()
2583 msg = str(err)
2584 if isinstance(err, errors.ConfigurationError):
2585 txt = "Corrupt configuration file: %s" % msg
2586 logging.error(txt)
2587 obuf.write(txt + "\n")
2588 obuf.write("Aborting.")
2589 retcode = 2
2590 elif isinstance(err, errors.HooksAbort):
2591 obuf.write("Failure: hooks execution failed:\n")
2592 for node, script, out in err.args[0]:
2593 if out:
2594 obuf.write(" node: %s, script: %s, output: %s\n" %
2595 (node, script, out))
2596 else:
2597 obuf.write(" node: %s, script: %s (no output)\n" %
2598 (node, script))
2599 elif isinstance(err, errors.HooksFailure):
2600 obuf.write("Failure: hooks general failure: %s" % msg)
2601 elif isinstance(err, errors.ResolverError):
2602 this_host = netutils.Hostname.GetSysName()
2603 if err.args[0] == this_host:
2604 msg = "Failure: can't resolve my own hostname ('%s')"
2605 else:
2606 msg = "Failure: can't resolve hostname '%s'"
2607 obuf.write(msg % err.args[0])
2608 elif isinstance(err, errors.OpPrereqError):
2609 if len(err.args) == 2:
2610 obuf.write("Failure: prerequisites not met for this"
2611 " operation:\nerror type: %s, error details:\n%s" %
2612 (err.args[1], err.args[0]))
2613 else:
2614 obuf.write("Failure: prerequisites not met for this"
2615 " operation:\n%s" % msg)
2616 elif isinstance(err, errors.OpExecError):
2617 obuf.write("Failure: command execution error:\n%s" % msg)
2618 elif isinstance(err, errors.TagError):
2619 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2620 elif isinstance(err, errors.JobQueueDrainError):
2621 obuf.write("Failure: the job queue is marked for drain and doesn't"
2622 " accept new requests\n")
2623 elif isinstance(err, errors.JobQueueFull):
2624 obuf.write("Failure: the job queue is full and doesn't accept new"
2625 " job submissions until old jobs are archived\n")
2626 elif isinstance(err, errors.TypeEnforcementError):
2627 obuf.write("Parameter Error: %s" % msg)
2628 elif isinstance(err, errors.ParameterError):
2629 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2630 elif isinstance(err, rpcerr.NoMasterError):
2631 if err.args[0] == pathutils.MASTER_SOCKET:
2632 daemon = "the master daemon"
2633 elif err.args[0] == pathutils.QUERY_SOCKET:
2634 daemon = "the config daemon"
2635 else:
2636 daemon = "socket '%s'" % str(err.args[0])
2637 obuf.write("Cannot communicate with %s.\nIs the process running"
2638 " and listening for connections?" % daemon)
2639 elif isinstance(err, rpcerr.TimeoutError):
2640 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2641 " been submitted and will continue to run even if the call"
2642 " timed out. Useful commands in this situation are \"gnt-job"
2643 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2644 obuf.write(msg)
2645 elif isinstance(err, rpcerr.PermissionError):
2646 obuf.write("It seems you don't have permissions to connect to the"
2647 " master daemon.\nPlease retry as a different user.")
2648 elif isinstance(err, rpcerr.ProtocolError):
2649 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2650 "%s" % msg)
2651 elif isinstance(err, errors.JobLost):
2652 obuf.write("Error checking job status: %s" % msg)
2653 elif isinstance(err, errors.QueryFilterParseError):
2654 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2655 obuf.write("\n".join(err.GetDetails()))
2656 elif isinstance(err, errors.GenericError):
2657 obuf.write("Unhandled Ganeti error: %s" % msg)
2658 elif isinstance(err, JobSubmittedException):
2659 obuf.write("JobID: %s\n" % err.args[0])
2660 retcode = 0
2661 else:
2662 obuf.write("Unhandled exception: %s" % msg)
2663 return retcode, obuf.getvalue().rstrip("\n")
2664
2665
2666 def GenericMain(commands, override=None, aliases=None,
2667 env_override=frozenset()):
2668 """Generic main function for all the gnt-* commands.
2669
2670 @param commands: a dictionary with a special structure, see the design doc
2671 for command line handling.
2672 @param override: if not None, we expect a dictionary with keys that will
2673 override command line options; this can be used to pass
2674 options from the scripts to generic functions
2675 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2676 @param env_override: list of environment names which are allowed to submit
2677 default args for commands
2678
2679 """
2680 # save the program name and the entire command line for later logging
2681 if sys.argv:
2682 binary = os.path.basename(sys.argv[0])
2683 if not binary:
2684 binary = sys.argv[0]
2685
2686 if len(sys.argv) >= 2:
2687 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2688 else:
2689 logname = binary
2690
2691 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2692 else:
2693 binary = "<unknown program>"
2694 cmdline = "<unknown>"
2695
2696 if aliases is None:
2697 aliases = {}
2698
2699 try:
2700 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2701 env_override)
2702 except _ShowVersion:
2703 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2704 constants.RELEASE_VERSION)
2705 return constants.EXIT_SUCCESS
2706 except _ShowUsage, err:
2707 for line in _FormatUsage(binary, commands):
2708 ToStdout(line)
2709
2710 if err.exit_error:
2711 return constants.EXIT_FAILURE
2712 else:
2713 return constants.EXIT_SUCCESS
2714 except errors.ParameterError, err:
2715 result, err_msg = FormatError(err)
2716 ToStderr(err_msg)
2717 return 1
2718
2719 if func is None: # parse error
2720 return 1
2721
2722 if override is not None:
2723 for key, val in override.iteritems():
2724 setattr(options, key, val)
2725
2726 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2727 stderr_logging=True)
2728
2729 logging.debug("Command line: %s", cmdline)
2730
2731 try:
2732 result = func(options, args)
2733 except (errors.GenericError, rpcerr.ProtocolError,
2734 JobSubmittedException), err:
2735 result, err_msg = FormatError(err)
2736 logging.exception("Error during command processing")
2737 ToStderr(err_msg)
2738 except KeyboardInterrupt:
2739 result = constants.EXIT_FAILURE
2740 ToStderr("Aborted. Note that if the operation created any jobs, they"
2741 " might have been submitted and"
2742 " will continue to run in the background.")
2743 except IOError, err:
2744 if err.errno == errno.EPIPE:
2745 # our terminal went away, we'll exit
2746 sys.exit(constants.EXIT_FAILURE)
2747 else:
2748 raise
2749
2750 return result
2751
2752
2753 def ParseNicOption(optvalue):
2754 """Parses the value of the --net option(s).
2755
2756 """
2757 try:
2758 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2759 except (TypeError, ValueError), err:
2760 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2761 errors.ECODE_INVAL)
2762
2763 nics = [{}] * nic_max
2764 for nidx, ndict in optvalue:
2765 nidx = int(nidx)
2766
2767 if not isinstance(ndict, dict):
2768 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2769 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2770
2771 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2772
2773 nics[nidx] = ndict
2774
2775 return nics
2776
2777
2778 def FixHvParams(hvparams):
2779 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2780 # comma to space because commas cannot be accepted on the command line
2781 # (they already act as the separator between different hvparams). Still,
2782 # RAPI should be able to accept commas for backwards compatibility.
2783 # Therefore, we convert spaces into commas here, and we keep the old
2784 # parsing logic everywhere else.
2785 try:
2786 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2787 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2788 except KeyError:
2789 #No usb_devices, no modification required
2790 pass
2791
2792
2793 def GenericInstanceCreate(mode, opts, args):
2794 """Add an instance to the cluster via either creation or import.
2795
2796 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2797 @param opts: the command line options selected by the user
2798 @type args: list
2799 @param args: should contain only one element, the new instance name
2800 @rtype: int
2801 @return: the desired exit code
2802
2803 """
2804 instance = args[0]
2805
2806 (pnode, snode) = SplitNodeOption(opts.node)
2807
2808 hypervisor = None
2809 hvparams = {}
2810 if opts.hypervisor:
2811 hypervisor, hvparams = opts.hypervisor
2812
2813 if opts.nics:
2814 nics = ParseNicOption(opts.nics)
2815 elif opts.no_nics:
2816 # no nics
2817 nics = []
2818 elif mode == constants.INSTANCE_CREATE:
2819 # default of one nic, all auto
2820 nics = [{}]
2821 else:
2822 # mode == import
2823 nics = []
2824
2825 if opts.disk_template == constants.DT_DISKLESS:
2826 if opts.disks or opts.sd_size is not None:
2827 raise errors.OpPrereqError("Diskless instance but disk"
2828 " information passed", errors.ECODE_INVAL)
2829 disks = []
2830 else:
2831 if (not opts.disks and not opts.sd_size
2832 and mode == constants.INSTANCE_CREATE):
2833 raise errors.OpPrereqError("No disk information specified",
2834 errors.ECODE_INVAL)
2835 if opts.disks and opts.sd_size is not None:
2836 raise errors.OpPrereqError("Please use either the '--disk' or"
2837 " '-s' option", errors.ECODE_INVAL)
2838 if opts.sd_size is not None:
2839 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2840
2841 if opts.disks:
2842 try:
2843 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2844 except ValueError, err:
2845 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2846 errors.ECODE_INVAL)
2847 disks = [{}] * disk_max
2848 else:
2849 disks = []
2850 for didx, ddict in opts.disks:
2851 didx = int(didx)
2852 if not isinstance(ddict, dict):
2853 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2854 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2855 elif constants.IDISK_SIZE in ddict:
2856 if constants.IDISK_ADOPT in ddict:
2857 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2858 " (disk %d)" % didx, errors.ECODE_INVAL)
2859 try:
2860 ddict[constants.IDISK_SIZE] = \
2861 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2862 except ValueError, err:
2863 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2864 (didx, err), errors.ECODE_INVAL)
2865 elif constants.IDISK_ADOPT in ddict:
2866 if constants.IDISK_SPINDLES in ddict:
2867 raise errors.OpPrereqError("spindles is not a valid option when"
2868 " adopting a disk", errors.ECODE_INVAL)
2869 if mode == constants.INSTANCE_IMPORT:
2870 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2871 " import", errors.ECODE_INVAL)
2872 ddict[constants.IDISK_SIZE] = 0
2873 else:
2874 raise errors.OpPrereqError("Missing size or adoption source for"
2875 " disk %d" % didx, errors.ECODE_INVAL)
2876 if constants.IDISK_SPINDLES in ddict:
2877 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2878
2879 disks[didx] = ddict
2880
2881 if opts.tags is not None:
2882 tags = opts.tags.split(",")
2883 else:
2884 tags = []
2885
2886 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2887 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2888 FixHvParams(hvparams)
2889
2890 osparams_private = opts.osparams_private or serializer.PrivateDict()
2891 osparams_secret = opts.osparams_secret or serializer.PrivateDict()
2892
2893 helper_startup_timeout = opts.helper_startup_timeout
2894 helper_shutdown_timeout = opts.helper_shutdown_timeout
2895
2896 if mode == constants.INSTANCE_CREATE:
2897 start = opts.start
2898 os_type = opts.os
2899 force_variant = opts.force_variant
2900 src_node = None
2901 src_path = None
2902 no_install = opts.no_install
2903 identify_defaults = False
2904 compress = constants.IEC_NONE
2905 if opts.instance_communication is None:
2906 instance_communication = False
2907 else:
2908 instance_communication = opts.instance_communication
2909 elif mode == constants.INSTANCE_IMPORT:
2910 start = False
2911 os_type = None
2912 force_variant = False
2913 src_node = opts.src_node
2914 src_path = opts.src_dir
2915 no_install = None
2916 identify_defaults = opts.identify_defaults
2917 compress = opts.compress
2918 instance_communication = False
2919 else:
2920 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2921
2922 op = opcodes.OpInstanceCreate(
2923 instance_name=instance,
2924 disks=disks,
2925 disk_template=opts.disk_template,
2926 group_name=opts.nodegroup,
2927 nics=nics,
2928 conflicts_check=opts.conflicts_check,
2929 pnode=pnode, snode=snode,
2930 ip_check=opts.ip_check,
2931 name_check=opts.name_check,
2932 wait_for_sync=opts.wait_for_sync,
2933 file_storage_dir=opts.file_storage_dir,
2934 file_driver=opts.file_driver,
2935 iallocator=opts.iallocator,
2936 hypervisor=hypervisor,
2937 hvparams=hvparams,
2938 beparams=opts.beparams,
2939 osparams=opts.osparams,
2940 osparams_private=osparams_private,
2941 osparams_secret=osparams_secret,
2942 mode=mode,
2943 opportunistic_locking=opts.opportunistic_locking,
2944 start=start,
2945 os_type=os_type,
2946 force_variant=force_variant,
2947 src_node=src_node,
2948 src_path=src_path,
2949 compress=compress,
2950 tags=tags,
2951 no_install=no_install,
2952 identify_defaults=identify_defaults,
2953 ignore_ipolicy=opts.ignore_ipolicy,
2954 instance_communication=instance_communication,
2955 helper_startup_timeout=helper_startup_timeout,
2956 helper_shutdown_timeout=helper_shutdown_timeout)
2957
2958 SubmitOrSend(op, opts)
2959 return 0
2960
2961
2962 class _RunWhileClusterStoppedHelper(object):
2963 """Helper class for L{RunWhileClusterStopped} to simplify state management
2964
2965 """
2966 def __init__(self, feedback_fn, cluster_name, master_node,
2967 online_nodes, ssh_ports):
2968 """Initializes this class.
2969
2970 @type feedback_fn: callable
2971 @param feedback_fn: Feedback function
2972 @type cluster_name: string
2973 @param cluster_name: Cluster name
2974 @type master_node: string
2975 @param master_node Master node name
2976 @type online_nodes: list
2977 @param online_nodes: List of names of online nodes
2978 @type ssh_ports: list
2979 @param ssh_ports: List of SSH ports of online nodes
2980
2981 """
2982 self.feedback_fn = feedback_fn
2983 self.cluster_name = cluster_name
2984 self.master_node = master_node
2985 self.online_nodes = online_nodes
2986 self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2987
2988 self.ssh = ssh.SshRunner(self.cluster_name)
2989
2990 self.nonmaster_nodes = [name for name in online_nodes
2991 if name != master_node]
2992
2993 assert self.master_node not in self.nonmaster_nodes
2994
2995 def _RunCmd(self, node_name, cmd):
2996 """Runs a command on the local or a remote machine.
2997
2998 @type node_name: string
2999 @param node_name: Machine name
3000 @type cmd: list
3001 @param cmd: Command
3002
3003 """
3004 if node_name is None or node_name == self.master_node:
3005 # No need to use SSH
3006 result = utils.RunCmd(cmd)
3007 else:
3008 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
3009 utils.ShellQuoteArgs(cmd),
3010 port=self.ssh_ports[node_name])
3011
3012 if result.failed:
3013 errmsg = ["Failed to run command %s" % result.cmd]
3014 if node_name:
3015 errmsg.append("on node %s" % node_name)
3016 errmsg.append(": exitcode %s and error %s" %
3017 (result.exit_code, result.output))
3018 raise errors.OpExecError(" ".join(errmsg))
3019
3020 def Call(self, fn, *args):
3021 """Call function while all daemons are stopped.
3022
3023 @type fn: callable
3024 @param fn: Function to be called
3025
3026 """
3027 # Pause watcher by acquiring an exclusive lock on watcher state file
3028 self.feedback_fn("Blocking watcher")
3029 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
3030 try:
3031 # TODO: Currently, this just blocks. There's no timeout.
3032 # TODO: Should it be a shared lock?
3033 watcher_block.Exclusive(blocking=True)
3034
3035 # Stop master daemons, so that no new jobs can come in and all running
3036 # ones are finished
3037 self.feedback_fn("Stopping master daemons")
3038 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
3039 try:
3040 # Stop daemons on all nodes
3041 for node_name in self.online_nodes:
3042 self.feedback_fn("Stopping daemons on %s" % node_name)
3043 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
3044
3045 # All daemons are shut down now
3046 try:
3047 return fn(self, *args)
3048 except Exception, err:
3049 _, errmsg = FormatError(err)
3050 logging.exception("Caught exception")
3051 self.feedback_fn(errmsg)
3052 raise
3053 finally:
3054 # Start cluster again, master node last
3055 for node_name in self.nonmaster_nodes + [self.master_node]:
3056 self.feedback_fn("Starting daemons on %s" % node_name)
3057 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
3058 finally:
3059 # Resume watcher
3060 watcher_block.Close()
3061
3062
3063 def RunWhileClusterStopped(feedback_fn, fn, *args):
3064 """Calls a function while all cluster daemons are stopped.
3065
3066 @type feedback_fn: callable
3067 @param feedback_fn: Feedback function
3068 @type fn: callable
3069 @param fn: Function to be called when daemons are stopped
3070
3071 """
3072 feedback_fn("Gathering cluster information")
3073
3074 # This ensures we're running on the master daemon
3075 cl = GetClient()
3076
3077 (cluster_name, master_node) = \
3078 cl.QueryConfigValues(["cluster_name", "master_node"])
3079
3080 online_nodes = GetOnlineNodes([], cl=cl)
3081 ssh_ports = GetNodesSshPorts(online_nodes, cl)
3082
3083 # Don't keep a reference to the client. The master daemon will go away.
3084 del cl
3085
3086 assert master_node in online_nodes
3087
3088 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
3089 online_nodes, ssh_ports).Call(fn, *args)
3090
3091
3092 def GenerateTable(headers, fields, separator, data,
3093 numfields=None, unitfields=None,
3094 units=None):
3095 """Prints a table with headers and different fields.
3096
3097 @type headers: dict
3098 @param headers: dictionary mapping field names to headers for
3099 the table
3100 @type fields: list
3101 @param fields: the field names corresponding to each row in
3102 the data field
3103 @param separator: the separator to be used; if this is None,
3104 the default 'smart' algorithm is used which computes optimal
3105 field width, otherwise just the separator is used between
3106 each field
3107 @type data: list
3108 @param data: a list of lists, each sublist being one row to be output
3109 @type numfields: list
3110 @param numfields: a list with the fields that hold numeric
3111 values and thus should be right-aligned
3112 @type unitfields: list
3113 @param unitfields: a list with the fields that hold numeric
3114 values that should be formatted with the units field
3115 @type units: string or None
3116 @param units: the units we should use for formatting, or None for
3117 automatic choice (human-readable for non-separator usage, otherwise
3118 megabytes); this is a one-letter string
3119
3120 """
3121 if units is None:
3122 if separator:
3123 units = "m"
3124 else:
3125 units = "h"
3126
3127 if numfields is None:
3128 numfields = []
3129 if unitfields is None:
3130 unitfields = []
3131
3132 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
3133 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
3134
3135 format_fields = []
3136 for field in fields:
3137 if headers and field not in headers:
3138 # TODO: handle better unknown fields (either revert to old
3139 # style of raising exception, or deal more intelligently with
3140 # variable fields)
3141 headers[field] = field
3142 if separator is not None:
3143 format_fields.append("%s")
3144 elif numfields.Matches(field):
3145 format_fields.append("%*s")
3146 else:
3147 format_fields.append("%-*s")
3148
3149 if separator is None:
3150 mlens = [0 for name in fields]
3151 format_str = " ".join(format_fields)
3152 else:
3153 format_str = separator.replace("%", "%%").join(format_fields)
3154
3155 for row in data:
3156 if row is None:
3157 continue
3158 for idx, val in enumerate(row):
3159 if unitfields.Matches(fields[idx]):
3160 try:
3161 val = int(val)
3162 except (TypeError, ValueError):
3163 pass
3164 else:
3165 val = row[idx] = utils.FormatUnit(val, units)
3166 val = row[idx] = str(val)
3167 if separator is None:
3168 mlens[idx] = max(mlens[idx], len(val))
3169
3170 result = []
3171 if headers:
3172 args = []
3173 for idx, name in enumerate(fields):
3174 hdr = headers[name]
3175 if separator is None:
3176 mlens[idx] = max(mlens[idx], len(hdr))
3177 args.append(mlens[idx])
3178 args.append(hdr)
3179 result.append(format_str % tuple(args))
3180
3181 if separator is None:
3182 assert len(mlens) == len(fields)
3183
3184 if fields and not numfields.Matches(fields[-1]):
3185 mlens[-1] = 0
3186
3187 for line in data:
3188 args = []
3189 if line is None:
3190 line = ["-" for _ in fields]
3191 for idx in range(len(fields)):
3192 if separator is None:
3193 args.append(mlens[idx])
3194 args.append(line[idx])
3195 result.append(format_str % tuple(args))
3196
3197 return result
3198
3199
3200 def _FormatBool(value):
3201 """Formats a boolean value as a string.
3202
3203 """
3204 if value:
3205 return "Y"
3206 return "N"
3207
3208
3209 #: Default formatting for query results; (callback, align right)
3210 _DEFAULT_FORMAT_QUERY = {
3211 constants.QFT_TEXT: (str, False),
3212 constants.QFT_BOOL: (_FormatBool, False),
3213 constants.QFT_NUMBER: (str, True),
3214 constants.QFT_NUMBER_FLOAT: (str, True),
3215 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3216 constants.QFT_OTHER: (str, False),
3217 constants.QFT_UNKNOWN: (str, False),
3218 }
3219