Merge branch 'stable-2.11' into stable-2.12
[ganeti-github.git] / lib / cli.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Module dealing with command line parsing"""
32
33
34 import sys
35 import textwrap
36 import os.path
37 import time
38 import logging
39 import errno
40 import itertools
41 import shlex
42 from cStringIO import StringIO
43
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import opcodes
48 import ganeti.rpc.errors as rpcerr
49 import ganeti.rpc.node as rpc
50 from ganeti import ssh
51 from ganeti import compat
52 from ganeti import netutils
53 from ganeti import qlang
54 from ganeti import objects
55 from ganeti import pathutils
56 from ganeti import serializer
57
58 from ganeti.runtime import (GetClient)
59
60 from optparse import (OptionParser, TitledHelpFormatter,
61 Option, OptionValueError)
62
63
64 __all__ = [
65 # Command line options
66 "ABSOLUTE_OPT",
67 "ADD_UIDS_OPT",
68 "ADD_RESERVED_IPS_OPT",
69 "ALLOCATABLE_OPT",
70 "ALLOC_POLICY_OPT",
71 "ALL_OPT",
72 "ALLOW_FAILOVER_OPT",
73 "AUTO_PROMOTE_OPT",
74 "AUTO_REPLACE_OPT",
75 "BACKEND_OPT",
76 "BLK_OS_OPT",
77 "CAPAB_MASTER_OPT",
78 "CAPAB_VM_OPT",
79 "CLEANUP_OPT",
80 "CLUSTER_DOMAIN_SECRET_OPT",
81 "CONFIRM_OPT",
82 "CP_SIZE_OPT",
83 "COMPRESSION_TOOLS_OPT",
84 "DEBUG_OPT",
85 "DEBUG_SIMERR_OPT",
86 "DISKIDX_OPT",
87 "DISK_OPT",
88 "DISK_PARAMS_OPT",
89 "DISK_TEMPLATE_OPT",
90 "DRAINED_OPT",
91 "DRY_RUN_OPT",
92 "DRBD_HELPER_OPT",
93 "DST_NODE_OPT",
94 "EARLY_RELEASE_OPT",
95 "ENABLED_HV_OPT",
96 "ENABLED_DISK_TEMPLATES_OPT",
97 "ENABLED_USER_SHUTDOWN_OPT",
98 "ERROR_CODES_OPT",
99 "FAILURE_ONLY_OPT",
100 "FIELDS_OPT",
101 "FILESTORE_DIR_OPT",
102 "FILESTORE_DRIVER_OPT",
103 "FORCE_FAILOVER_OPT",
104 "FORCE_FILTER_OPT",
105 "FORCE_OPT",
106 "FORCE_VARIANT_OPT",
107 "GATEWAY_OPT",
108 "GATEWAY6_OPT",
109 "GLOBAL_FILEDIR_OPT",
110 "HID_OS_OPT",
111 "GLOBAL_GLUSTER_FILEDIR_OPT",
112 "GLOBAL_SHARED_FILEDIR_OPT",
113 "HOTPLUG_OPT",
114 "HOTPLUG_IF_POSSIBLE_OPT",
115 "HVLIST_OPT",
116 "HVOPTS_OPT",
117 "HYPERVISOR_OPT",
118 "IALLOCATOR_OPT",
119 "DEFAULT_IALLOCATOR_OPT",
120 "DEFAULT_IALLOCATOR_PARAMS_OPT",
121 "IDENTIFY_DEFAULTS_OPT",
122 "IGNORE_CONSIST_OPT",
123 "IGNORE_ERRORS_OPT",
124 "IGNORE_FAILURES_OPT",
125 "IGNORE_OFFLINE_OPT",
126 "IGNORE_REMOVE_FAILURES_OPT",
127 "IGNORE_SECONDARIES_OPT",
128 "IGNORE_SIZE_OPT",
129 "INCLUDEDEFAULTS_OPT",
130 "INTERVAL_OPT",
131 "INSTALL_IMAGE_OPT",
132 "INSTANCE_COMMUNICATION_OPT",
133 "INSTANCE_COMMUNICATION_NETWORK_OPT",
134 "MAC_PREFIX_OPT",
135 "MAINTAIN_NODE_HEALTH_OPT",
136 "MASTER_NETDEV_OPT",
137 "MASTER_NETMASK_OPT",
138 "MAX_TRACK_OPT",
139 "MC_OPT",
140 "MIGRATION_MODE_OPT",
141 "MODIFY_ETCHOSTS_OPT",
142 "NET_OPT",
143 "NETWORK_OPT",
144 "NETWORK6_OPT",
145 "NEW_CLUSTER_CERT_OPT",
146 "NEW_NODE_CERT_OPT",
147 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
148 "NEW_CONFD_HMAC_KEY_OPT",
149 "NEW_RAPI_CERT_OPT",
150 "NEW_PRIMARY_OPT",
151 "NEW_SECONDARY_OPT",
152 "NEW_SPICE_CERT_OPT",
153 "NIC_PARAMS_OPT",
154 "NOCONFLICTSCHECK_OPT",
155 "NODE_FORCE_JOIN_OPT",
156 "NODE_LIST_OPT",
157 "NODE_PLACEMENT_OPT",
158 "NODEGROUP_OPT",
159 "NODE_PARAMS_OPT",
160 "NODE_POWERED_OPT",
161 "NOHDR_OPT",
162 "NOIPCHECK_OPT",
163 "NO_INSTALL_OPT",
164 "NONAMECHECK_OPT",
165 "NOMODIFY_ETCHOSTS_OPT",
166 "NOMODIFY_SSH_SETUP_OPT",
167 "NONICS_OPT",
168 "NONLIVE_OPT",
169 "NONPLUS1_OPT",
170 "NORUNTIME_CHGS_OPT",
171 "NOSHUTDOWN_OPT",
172 "NOSTART_OPT",
173 "NOSSH_KEYCHECK_OPT",
174 "NOVOTING_OPT",
175 "NO_REMEMBER_OPT",
176 "NWSYNC_OPT",
177 "OFFLINE_INST_OPT",
178 "ONLINE_INST_OPT",
179 "ON_PRIMARY_OPT",
180 "ON_SECONDARY_OPT",
181 "OFFLINE_OPT",
182 "OS_OPT",
183 "OSPARAMS_OPT",
184 "OSPARAMS_PRIVATE_OPT",
185 "OSPARAMS_SECRET_OPT",
186 "OS_SIZE_OPT",
187 "OOB_TIMEOUT_OPT",
188 "POWER_DELAY_OPT",
189 "PREALLOC_WIPE_DISKS_OPT",
190 "PRIMARY_IP_VERSION_OPT",
191 "PRIMARY_ONLY_OPT",
192 "PRINT_JOBID_OPT",
193 "PRIORITY_OPT",
194 "RAPI_CERT_OPT",
195 "READD_OPT",
196 "REASON_OPT",
197 "REBOOT_TYPE_OPT",
198 "REMOVE_INSTANCE_OPT",
199 "REMOVE_RESERVED_IPS_OPT",
200 "REMOVE_UIDS_OPT",
201 "RESERVED_LVS_OPT",
202 "RQL_OPT",
203 "RUNTIME_MEM_OPT",
204 "ROMAN_OPT",
205 "SECONDARY_IP_OPT",
206 "SECONDARY_ONLY_OPT",
207 "SELECT_OS_OPT",
208 "SEP_OPT",
209 "SHOWCMD_OPT",
210 "SHOW_MACHINE_OPT",
211 "COMPRESS_OPT",
212 "TRANSPORT_COMPRESSION_OPT",
213 "SHUTDOWN_TIMEOUT_OPT",
214 "SINGLE_NODE_OPT",
215 "SPECS_CPU_COUNT_OPT",
216 "SPECS_DISK_COUNT_OPT",
217 "SPECS_DISK_SIZE_OPT",
218 "SPECS_MEM_SIZE_OPT",
219 "SPECS_NIC_COUNT_OPT",
220 "SPLIT_ISPECS_OPTS",
221 "IPOLICY_STD_SPECS_OPT",
222 "IPOLICY_DISK_TEMPLATES",
223 "IPOLICY_VCPU_RATIO",
224 "IPOLICY_SPINDLE_RATIO",
225 "SEQUENTIAL_OPT",
226 "SPICE_CACERT_OPT",
227 "SPICE_CERT_OPT",
228 "SRC_DIR_OPT",
229 "SRC_NODE_OPT",
230 "SUBMIT_OPT",
231 "SUBMIT_OPTS",
232 "STARTUP_PAUSED_OPT",
233 "STATIC_OPT",
234 "SYNC_OPT",
235 "TAG_ADD_OPT",
236 "TAG_SRC_OPT",
237 "TIMEOUT_OPT",
238 "TO_GROUP_OPT",
239 "UIDPOOL_OPT",
240 "USEUNITS_OPT",
241 "USE_EXTERNAL_MIP_SCRIPT",
242 "USE_REPL_NET_OPT",
243 "VERBOSE_OPT",
244 "VG_NAME_OPT",
245 "WFSYNC_OPT",
246 "YES_DOIT_OPT",
247 "ZEROING_IMAGE_OPT",
248 "ZERO_FREE_SPACE_OPT",
249 "HELPER_STARTUP_TIMEOUT_OPT",
250 "HELPER_SHUTDOWN_TIMEOUT_OPT",
251 "ZEROING_TIMEOUT_FIXED_OPT",
252 "ZEROING_TIMEOUT_PER_MIB_OPT",
253 "DISK_STATE_OPT",
254 "HV_STATE_OPT",
255 "IGNORE_IPOLICY_OPT",
256 "INSTANCE_POLICY_OPTS",
257 # Generic functions for CLI programs
258 "ConfirmOperation",
259 "CreateIPolicyFromOpts",
260 "GenericMain",
261 "GenericInstanceCreate",
262 "GenericList",
263 "GenericListFields",
264 "GetClient",
265 "GetOnlineNodes",
266 "GetNodesSshPorts",
267 "JobExecutor",
268 "JobSubmittedException",
269 "ParseTimespec",
270 "RunWhileClusterStopped",
271 "RunWhileDaemonsStopped",
272 "SubmitOpCode",
273 "SubmitOpCodeToDrainedQueue",
274 "SubmitOrSend",
275 "UsesRPC",
276 # Formatting functions
277 "ToStderr", "ToStdout",
278 "ToStdoutAndLoginfo",
279 "FormatError",
280 "FormatQueryResult",
281 "FormatParamsDictInfo",
282 "FormatPolicyInfo",
283 "PrintIPolicyCommand",
284 "PrintGenericInfo",
285 "GenerateTable",
286 "AskUser",
287 "FormatTimestamp",
288 "FormatLogMessage",
289 # Tags functions
290 "ListTags",
291 "AddTags",
292 "RemoveTags",
293 # command line options support infrastructure
294 "ARGS_MANY_INSTANCES",
295 "ARGS_MANY_NODES",
296 "ARGS_MANY_GROUPS",
297 "ARGS_MANY_NETWORKS",
298 "ARGS_NONE",
299 "ARGS_ONE_INSTANCE",
300 "ARGS_ONE_NODE",
301 "ARGS_ONE_GROUP",
302 "ARGS_ONE_OS",
303 "ARGS_ONE_NETWORK",
304 "ArgChoice",
305 "ArgCommand",
306 "ArgFile",
307 "ArgGroup",
308 "ArgHost",
309 "ArgInstance",
310 "ArgJobId",
311 "ArgNetwork",
312 "ArgNode",
313 "ArgOs",
314 "ArgExtStorage",
315 "ArgSuggest",
316 "ArgUnknown",
317 "OPT_COMPL_INST_ADD_NODES",
318 "OPT_COMPL_MANY_NODES",
319 "OPT_COMPL_ONE_IALLOCATOR",
320 "OPT_COMPL_ONE_INSTANCE",
321 "OPT_COMPL_ONE_NODE",
322 "OPT_COMPL_ONE_NODEGROUP",
323 "OPT_COMPL_ONE_NETWORK",
324 "OPT_COMPL_ONE_OS",
325 "OPT_COMPL_ONE_EXTSTORAGE",
326 "cli_option",
327 "FixHvParams",
328 "SplitNodeOption",
329 "CalculateOSNames",
330 "ParseFields",
331 "COMMON_CREATE_OPTS",
332 ]
333
334 NO_PREFIX = "no_"
335 UN_PREFIX = "-"
336
337 #: Priorities (sorted)
338 _PRIORITY_NAMES = [
339 ("low", constants.OP_PRIO_LOW),
340 ("normal", constants.OP_PRIO_NORMAL),
341 ("high", constants.OP_PRIO_HIGH),
342 ]
343
344 #: Priority dictionary for easier lookup
345 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
346 # we migrate to Python 2.6
347 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
348
349 # Query result status for clients
350 (QR_NORMAL,
351 QR_UNKNOWN,
352 QR_INCOMPLETE) = range(3)
353
354 #: Maximum batch size for ChooseJob
355 _CHOOSE_BATCH = 25
356
357
358 # constants used to create InstancePolicy dictionary
359 TISPECS_GROUP_TYPES = {
360 constants.ISPECS_MIN: constants.VTYPE_INT,
361 constants.ISPECS_MAX: constants.VTYPE_INT,
362 }
363
364 TISPECS_CLUSTER_TYPES = {
365 constants.ISPECS_MIN: constants.VTYPE_INT,
366 constants.ISPECS_MAX: constants.VTYPE_INT,
367 constants.ISPECS_STD: constants.VTYPE_INT,
368 }
369
370 #: User-friendly names for query2 field types
371 _QFT_NAMES = {
372 constants.QFT_UNKNOWN: "Unknown",
373 constants.QFT_TEXT: "Text",
374 constants.QFT_BOOL: "Boolean",
375 constants.QFT_NUMBER: "Number",
376 constants.QFT_NUMBER_FLOAT: "Floating-point number",
377 constants.QFT_UNIT: "Storage size",
378 constants.QFT_TIMESTAMP: "Timestamp",
379 constants.QFT_OTHER: "Custom",
380 }
381
382
383 class _Argument(object):
384 def __init__(self, min=0, max=None): # pylint: disable=W0622
385 self.min = min
386 self.max = max
387
388 def __repr__(self):
389 return ("<%s min=%s max=%s>" %
390 (self.__class__.__name__, self.min, self.max))
391
392
393 class ArgSuggest(_Argument):
394 """Suggesting argument.
395
396 Value can be any of the ones passed to the constructor.
397
398 """
399 # pylint: disable=W0622
400 def __init__(self, min=0, max=None, choices=None):
401 _Argument.__init__(self, min=min, max=max)
402 self.choices = choices
403
404 def __repr__(self):
405 return ("<%s min=%s max=%s choices=%r>" %
406 (self.__class__.__name__, self.min, self.max, self.choices))
407
408
409 class ArgChoice(ArgSuggest):
410 """Choice argument.
411
412 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
413 but value must be one of the choices.
414
415 """
416
417
418 class ArgUnknown(_Argument):
419 """Unknown argument to program (e.g. determined at runtime).
420
421 """
422
423
424 class ArgInstance(_Argument):
425 """Instances argument.
426
427 """
428
429
430 class ArgNode(_Argument):
431 """Node argument.
432
433 """
434
435
436 class ArgNetwork(_Argument):
437 """Network argument.
438
439 """
440
441
442 class ArgGroup(_Argument):
443 """Node group argument.
444
445 """
446
447
448 class ArgJobId(_Argument):
449 """Job ID argument.
450
451 """
452
453
454 class ArgFile(_Argument):
455 """File path argument.
456
457 """
458
459
460 class ArgCommand(_Argument):
461 """Command argument.
462
463 """
464
465
466 class ArgHost(_Argument):
467 """Host argument.
468
469 """
470
471
472 class ArgOs(_Argument):
473 """OS argument.
474
475 """
476
477
478 class ArgExtStorage(_Argument):
479 """ExtStorage argument.
480
481 """
482
483
484 ARGS_NONE = []
485 ARGS_MANY_INSTANCES = [ArgInstance()]
486 ARGS_MANY_NETWORKS = [ArgNetwork()]
487 ARGS_MANY_NODES = [ArgNode()]
488 ARGS_MANY_GROUPS = [ArgGroup()]
489 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
490 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
491 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
492 # TODO
493 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
494 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
495
496
497 def _ExtractTagsObject(opts, args):
498 """Extract the tag type object.
499
500 Note that this function will modify its args parameter.
501
502 """
503 if not hasattr(opts, "tag_type"):
504 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
505 kind = opts.tag_type
506 if kind == constants.TAG_CLUSTER:
507 retval = kind, ""
508 elif kind in (constants.TAG_NODEGROUP,
509 constants.TAG_NODE,
510 constants.TAG_NETWORK,
511 constants.TAG_INSTANCE):
512 if not args:
513 raise errors.OpPrereqError("no arguments passed to the command",
514 errors.ECODE_INVAL)
515 name = args.pop(0)
516 retval = kind, name
517 else:
518 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
519 return retval
520
521
522 def _ExtendTags(opts, args):
523 """Extend the args if a source file has been given.
524
525 This function will extend the tags with the contents of the file
526 passed in the 'tags_source' attribute of the opts parameter. A file
527 named '-' will be replaced by stdin.
528
529 """
530 fname = opts.tags_source
531 if fname is None:
532 return
533 if fname == "-":
534 new_fh = sys.stdin
535 else:
536 new_fh = open(fname, "r")
537 new_data = []
538 try:
539 # we don't use the nice 'new_data = [line.strip() for line in fh]'
540 # because of python bug 1633941
541 while True:
542 line = new_fh.readline()
543 if not line:
544 break
545 new_data.append(line.strip())
546 finally:
547 new_fh.close()
548 args.extend(new_data)
549
550
551 def ListTags(opts, args):
552 """List the tags on a given object.
553
554 This is a generic implementation that knows how to deal with all
555 three cases of tag objects (cluster, node, instance). The opts
556 argument is expected to contain a tag_type field denoting what
557 object type we work on.
558
559 """
560 kind, name = _ExtractTagsObject(opts, args)
561 cl = GetClient()
562 result = cl.QueryTags(kind, name)
563 result = list(result)
564 result.sort()
565 for tag in result:
566 ToStdout(tag)
567
568
569 def AddTags(opts, args):
570 """Add tags on a given object.
571
572 This is a generic implementation that knows how to deal with all
573 three cases of tag objects (cluster, node, instance). The opts
574 argument is expected to contain a tag_type field denoting what
575 object type we work on.
576
577 """
578 kind, name = _ExtractTagsObject(opts, args)
579 _ExtendTags(opts, args)
580 if not args:
581 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
582 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
583 SubmitOrSend(op, opts)
584
585
586 def RemoveTags(opts, args):
587 """Remove tags from a given object.
588
589 This is a generic implementation that knows how to deal with all
590 three cases of tag objects (cluster, node, instance). The opts
591 argument is expected to contain a tag_type field denoting what
592 object type we work on.
593
594 """
595 kind, name = _ExtractTagsObject(opts, args)
596 _ExtendTags(opts, args)
597 if not args:
598 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
599 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
600 SubmitOrSend(op, opts)
601
602
603 def check_unit(option, opt, value): # pylint: disable=W0613
604 """OptParsers custom converter for units.
605
606 """
607 try:
608 return utils.ParseUnit(value)
609 except errors.UnitParseError, err:
610 raise OptionValueError("option %s: %s" % (opt, err))
611
612
613 def _SplitKeyVal(opt, data, parse_prefixes):
614 """Convert a KeyVal string into a dict.
615
616 This function will convert a key=val[,...] string into a dict. Empty
617 values will be converted specially: keys which have the prefix 'no_'
618 will have the value=False and the prefix stripped, keys with the prefix
619 "-" will have value=None and the prefix stripped, and the others will
620 have value=True.
621
622 @type opt: string
623 @param opt: a string holding the option name for which we process the
624 data, used in building error messages
625 @type data: string
626 @param data: a string of the format key=val,key=val,...
627 @type parse_prefixes: bool
628 @param parse_prefixes: whether to handle prefixes specially
629 @rtype: dict
630 @return: {key=val, key=val}
631 @raises errors.ParameterError: if there are duplicate keys
632
633 """
634 kv_dict = {}
635 if data:
636 for elem in utils.UnescapeAndSplit(data, sep=","):
637 if "=" in elem:
638 key, val = elem.split("=", 1)
639 elif parse_prefixes:
640 if elem.startswith(NO_PREFIX):
641 key, val = elem[len(NO_PREFIX):], False
642 elif elem.startswith(UN_PREFIX):
643 key, val = elem[len(UN_PREFIX):], None
644 else:
645 key, val = elem, True
646 else:
647 raise errors.ParameterError("Missing value for key '%s' in option %s" %
648 (elem, opt))
649 if key in kv_dict:
650 raise errors.ParameterError("Duplicate key '%s' in option %s" %
651 (key, opt))
652 kv_dict[key] = val
653 return kv_dict
654
655
656 def _SplitIdentKeyVal(opt, value, parse_prefixes):
657 """Helper function to parse "ident:key=val,key=val" options.
658
659 @type opt: string
660 @param opt: option name, used in error messages
661 @type value: string
662 @param value: expected to be in the format "ident:key=val,key=val,..."
663 @type parse_prefixes: bool
664 @param parse_prefixes: whether to handle prefixes specially (see
665 L{_SplitKeyVal})
666 @rtype: tuple
667 @return: (ident, {key=val, key=val})
668 @raises errors.ParameterError: in case of duplicates or other parsing errors
669
670 """
671 if ":" not in value:
672 ident, rest = value, ""
673 else:
674 ident, rest = value.split(":", 1)
675
676 if parse_prefixes and ident.startswith(NO_PREFIX):
677 if rest:
678 msg = "Cannot pass options when removing parameter groups: %s" % value
679 raise errors.ParameterError(msg)
680 retval = (ident[len(NO_PREFIX):], False)
681 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
682 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
683 if rest:
684 msg = "Cannot pass options when removing parameter groups: %s" % value
685 raise errors.ParameterError(msg)
686 retval = (ident[len(UN_PREFIX):], None)
687 else:
688 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
689 retval = (ident, kv_dict)
690 return retval
691
692
693 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
694 """Custom parser for ident:key=val,key=val options.
695
696 This will store the parsed values as a tuple (ident, {key: val}). As such,
697 multiple uses of this option via action=append is possible.
698
699 """
700 return _SplitIdentKeyVal(opt, value, True)
701
702
703 def check_key_val(option, opt, value): # pylint: disable=W0613
704 """Custom parser class for key=val,key=val options.
705
706 This will store the parsed values as a dict {key: val}.
707
708 """
709 return _SplitKeyVal(opt, value, True)
710
711
712 def check_key_private_val(option, opt, value): # pylint: disable=W0613
713 """Custom parser class for private and secret key=val,key=val options.
714
715 This will store the parsed values as a dict {key: val}.
716
717 """
718 return serializer.PrivateDict(_SplitKeyVal(opt, value, True))
719
720
721 def _SplitListKeyVal(opt, value):
722 retval = {}
723 for elem in value.split("/"):
724 if not elem:
725 raise errors.ParameterError("Empty section in option '%s'" % opt)
726 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
727 if ident in retval:
728 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
729 (ident, opt, elem))
730 raise errors.ParameterError(msg)
731 retval[ident] = valdict
732 return retval
733
734
735 def check_multilist_ident_key_val(_, opt, value):
736 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
737
738 @rtype: list of dictionary
739 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
740
741 """
742 retval = []
743 for line in value.split("//"):
744 retval.append(_SplitListKeyVal(opt, line))
745 return retval
746
747
748 def check_bool(option, opt, value): # pylint: disable=W0613
749 """Custom parser for yes/no options.
750
751 This will store the parsed value as either True or False.
752
753 """
754 value = value.lower()
755 if value == constants.VALUE_FALSE or value == "no":
756 return False
757 elif value == constants.VALUE_TRUE or value == "yes":
758 return True
759 else:
760 raise errors.ParameterError("Invalid boolean value '%s'" % value)
761
762
763 def check_list(option, opt, value): # pylint: disable=W0613
764 """Custom parser for comma-separated lists.
765
766 """
767 # we have to make this explicit check since "".split(",") is [""],
768 # not an empty list :(
769 if not value:
770 return []
771 else:
772 return utils.UnescapeAndSplit(value)
773
774
775 def check_maybefloat(option, opt, value): # pylint: disable=W0613
776 """Custom parser for float numbers which might be also defaults.
777
778 """
779 value = value.lower()
780
781 if value == constants.VALUE_DEFAULT:
782 return value
783 else:
784 return float(value)
785
786
787 # completion_suggestion is normally a list. Using numeric values not evaluating
788 # to False for dynamic completion.
789 (OPT_COMPL_MANY_NODES,
790 OPT_COMPL_ONE_NODE,
791 OPT_COMPL_ONE_INSTANCE,
792 OPT_COMPL_ONE_OS,
793 OPT_COMPL_ONE_EXTSTORAGE,
794 OPT_COMPL_ONE_IALLOCATOR,
795 OPT_COMPL_ONE_NETWORK,
796 OPT_COMPL_INST_ADD_NODES,
797 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
798
799 OPT_COMPL_ALL = compat.UniqueFrozenset([
800 OPT_COMPL_MANY_NODES,
801 OPT_COMPL_ONE_NODE,
802 OPT_COMPL_ONE_INSTANCE,
803 OPT_COMPL_ONE_OS,
804 OPT_COMPL_ONE_EXTSTORAGE,
805 OPT_COMPL_ONE_IALLOCATOR,
806 OPT_COMPL_ONE_NETWORK,
807 OPT_COMPL_INST_ADD_NODES,
808 OPT_COMPL_ONE_NODEGROUP,
809 ])
810
811
812 class CliOption(Option):
813 """Custom option class for optparse.
814
815 """
816 ATTRS = Option.ATTRS + [
817 "completion_suggest",
818 ]
819 TYPES = Option.TYPES + (
820 "multilistidentkeyval",
821 "identkeyval",
822 "keyval",
823 "keyprivateval",
824 "unit",
825 "bool",
826 "list",
827 "maybefloat",
828 )
829 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
830 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
831 TYPE_CHECKER["identkeyval"] = check_ident_key_val
832 TYPE_CHECKER["keyval"] = check_key_val
833 TYPE_CHECKER["keyprivateval"] = check_key_private_val
834 TYPE_CHECKER["unit"] = check_unit
835 TYPE_CHECKER["bool"] = check_bool
836 TYPE_CHECKER["list"] = check_list
837 TYPE_CHECKER["maybefloat"] = check_maybefloat
838
839
840 # optparse.py sets make_option, so we do it for our own option class, too
841 cli_option = CliOption
842
843
844 _YORNO = "yes|no"
845
846 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
847 help="Increase debugging level")
848
849 NOHDR_OPT = cli_option("--no-headers", default=False,
850 action="store_true", dest="no_headers",
851 help="Don't display column headers")
852
853 SEP_OPT = cli_option("--separator", default=None,
854 action="store", dest="separator",
855 help=("Separator between output fields"
856 " (defaults to one space)"))
857
858 USEUNITS_OPT = cli_option("--units", default=None,
859 dest="units", choices=("h", "m", "g", "t"),
860 help="Specify units for output (one of h/m/g/t)")
861
862 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
863 type="string", metavar="FIELDS",
864 help="Comma separated list of output fields")
865
866 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
867 default=False, help="Force the operation")
868
869 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
870 default=False, help="Do not require confirmation")
871
872 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
873 action="store_true", default=False,
874 help=("Ignore offline nodes and do as much"
875 " as possible"))
876
877 TAG_ADD_OPT = cli_option("--tags", dest="tags",
878 default=None, help="Comma-separated list of instance"
879 " tags")
880
881 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
882 default=None, help="File with tag names")
883
884 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
885 default=False, action="store_true",
886 help=("Submit the job and return the job ID, but"
887 " don't wait for the job to finish"))
888
889 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
890 default=False, action="store_true",
891 help=("Additionally print the job as first line"
892 " on stdout (for scripting)."))
893
894 SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential",
895 default=False, action="store_true",
896 help=("Execute all resulting jobs sequentially"))
897
898 SYNC_OPT = cli_option("--sync", dest="do_locking",
899 default=False, action="store_true",
900 help=("Grab locks while doing the queries"
901 " in order to ensure more consistent results"))
902
903 DRY_RUN_OPT = cli_option("--dry-run", default=False,
904 action="store_true",
905 help=("Do not execute the operation, just run the"
906 " check steps and verify if it could be"
907 " executed"))
908
909 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
910 action="store_true",
911 help="Increase the verbosity of the operation")
912
913 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
914 action="store_true", dest="simulate_errors",
915 help="Debugging option that makes the operation"
916 " treat most runtime checks as failed")
917
918 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
919 default=True, action="store_false",
920 help="Don't wait for sync (DANGEROUS!)")
921
922 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
923 default=False, action="store_true",
924 help="Wait for disks to sync")
925
926 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
927 action="store_true", default=False,
928 help="Enable offline instance")
929
930 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
931 action="store_true", default=False,
932 help="Disable down instance")
933
934 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
935 help=("Custom disk setup (%s)" %
936 utils.CommaJoin(constants.DISK_TEMPLATES)),
937 default=None, metavar="TEMPL",
938 choices=list(constants.DISK_TEMPLATES))
939
940 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
941 help="Do not create any network cards for"
942 " the instance")
943
944 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
945 help="Relative path under default cluster-wide"
946 " file storage dir to store file-based disks",
947 default=None, metavar="<DIR>")
948
949 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
950 help="Driver to use for image files",
951 default=None, metavar="<DRIVER>",
952 choices=list(constants.FILE_DRIVER))
953
954 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
955 help="Select nodes for the instance automatically"
956 " using the <NAME> iallocator plugin",
957 default=None, type="string",
958 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
959
960 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
961 metavar="<NAME>",
962 help="Set the default instance"
963 " allocator plugin",
964 default=None, type="string",
965 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
966
967 DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
968 dest="default_iallocator_params",
969 help="iallocator template"
970 " parameters, in the format"
971 " template:option=value,"
972 " option=value,...",
973 type="keyval",
974 default=None)
975
976 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
977 metavar="<os>",
978 completion_suggest=OPT_COMPL_ONE_OS)
979
980 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
981 type="keyval", default={},
982 help="OS parameters")
983
984 OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private",
985 dest="osparams_private",
986 type="keyprivateval",
987 default=serializer.PrivateDict(),
988 help="Private OS parameters"
989 " (won't be logged)")
990
991 OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret",
992 dest="osparams_secret",
993 type="keyprivateval",
994 default=serializer.PrivateDict(),
995 help="Secret OS parameters (won't be logged or"
996 " saved; you must supply these for every"
997 " operation.)")
998
999 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
1000 action="store_true", default=False,
1001 help="Force an unknown variant")
1002
1003 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
1004 action="store_true", default=False,
1005 help="Do not install the OS (will"
1006 " enable no-start)")
1007
1008 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
1009 dest="allow_runtime_chgs",
1010 default=True, action="store_false",
1011 help="Don't allow runtime changes")
1012
1013 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
1014 type="keyval", default={},
1015 help="Backend parameters")
1016
1017 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
1018 default={}, dest="hvparams",
1019 help="Hypervisor parameters")
1020
1021 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
1022 help="Disk template parameters, in the format"
1023 " template:option=value,option=value,...",
1024 type="identkeyval", action="append", default=[])
1025
1026 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
1027 type="keyval", default={},
1028 help="Memory size specs: list of key=value,"
1029 " where key is one of min, max, std"
1030 " (in MB or using a unit)")
1031
1032 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
1033 type="keyval", default={},
1034 help="CPU count specs: list of key=value,"
1035 " where key is one of min, max, std")
1036
1037 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
1038 dest="ispecs_disk_count",
1039 type="keyval", default={},
1040 help="Disk count specs: list of key=value,"
1041 " where key is one of min, max, std")
1042
1043 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
1044 type="keyval", default={},
1045 help="Disk size specs: list of key=value,"
1046 " where key is one of min, max, std"
1047 " (in MB or using a unit)")
1048
1049 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
1050 type="keyval", default={},
1051 help="NIC count specs: list of key=value,"
1052 " where key is one of min, max, std")
1053
1054 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
1055 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
1056 dest="ipolicy_bounds_specs",
1057 type="multilistidentkeyval", default=None,
1058 help="Complete instance specs limits")
1059
1060 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1061 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1062 dest="ipolicy_std_specs",
1063 type="keyval", default=None,
1064 help="Complete standard instance specs")
1065
1066 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1067 dest="ipolicy_disk_templates",
1068 type="list", default=None,
1069 help="Comma-separated list of"
1070 " enabled disk templates")
1071
1072 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1073 dest="ipolicy_vcpu_ratio",
1074 type="maybefloat", default=None,
1075 help="The maximum allowed vcpu-to-cpu ratio")
1076
1077 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1078 dest="ipolicy_spindle_ratio",
1079 type="maybefloat", default=None,
1080 help=("The maximum allowed instances to"
1081 " spindle ratio"))
1082
1083 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1084 help="Hypervisor and hypervisor options, in the"
1085 " format hypervisor:option=value,option=value,...",
1086 default=None, type="identkeyval")
1087
1088 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1089 help="Hypervisor and hypervisor options, in the"
1090 " format hypervisor:option=value,option=value,...",
1091 default=[], action="append", type="identkeyval")
1092
1093 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1094 action="store_false",
1095 help="Don't check that the instance's IP"
1096 " is alive")
1097
1098 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1099 default=True, action="store_false",
1100 help="Don't check that the instance's name"
1101 " is resolvable")
1102
1103 NET_OPT = cli_option("--net",
1104 help="NIC parameters", default=[],
1105 dest="nics", action="append", type="identkeyval")
1106
1107 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1108 dest="disks", action="append", type="identkeyval")
1109
1110 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1111 help="Comma-separated list of disks"
1112 " indices to act on (e.g. 0,2) (optional,"
1113 " defaults to all disks)")
1114
1115 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1116 help="Enforces a single-disk configuration using the"
1117 " given disk size, in MiB unless a suffix is used",
1118 default=None, type="unit", metavar="<size>")
1119
1120 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1121 dest="ignore_consistency",
1122 action="store_true", default=False,
1123 help="Ignore the consistency of the disks on"
1124 " the secondary")
1125
1126 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1127 dest="allow_failover",
1128 action="store_true", default=False,
1129 help="If migration is not possible fallback to"
1130 " failover")
1131
1132 FORCE_FAILOVER_OPT = cli_option("--force-failover",
1133 dest="force_failover",
1134 action="store_true", default=False,
1135 help="Do not use migration, always use"
1136 " failover")
1137
1138 NONLIVE_OPT = cli_option("--non-live", dest="live",
1139 default=True, action="store_false",
1140 help="Do a non-live migration (this usually means"
1141 " freeze the instance, save the state, transfer and"
1142 " only then resume running on the secondary node)")
1143
1144 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1145 default=None,
1146 choices=list(constants.HT_MIGRATION_MODES),
1147 help="Override default migration mode (choose"
1148 " either live or non-live")
1149
1150 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1151 help="Target node and optional secondary node",
1152 metavar="<pnode>[:<snode>]",
1153 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1154
1155 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1156 action="append", metavar="<node>",
1157 help="Use only this node (can be used multiple"
1158 " times, if not given defaults to all nodes)",
1159 completion_suggest=OPT_COMPL_ONE_NODE)
1160
1161 NODEGROUP_OPT_NAME = "--node-group"
1162 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1163 dest="nodegroup",
1164 help="Node group (name or uuid)",
1165 metavar="<nodegroup>",
1166 default=None, type="string",
1167 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1168
1169 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1170 metavar="<node>",
1171 completion_suggest=OPT_COMPL_ONE_NODE)
1172
1173 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1174 action="store_false",
1175 help="Don't start the instance after creation")
1176
1177 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1178 action="store_true", default=False,
1179 help="Show command instead of executing it")
1180
1181 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1182 default=False, action="store_true",
1183 help="Instead of performing the migration/failover,"
1184 " try to recover from a failed cleanup. This is safe"
1185 " to run even if the instance is healthy, but it"
1186 " will create extra replication traffic and "
1187 " disrupt briefly the replication (like during the"
1188 " migration/failover")
1189
1190 STATIC_OPT = cli_option("-s", "--static", dest="static",
1191 action="store_true", default=False,
1192 help="Only show configuration data, not runtime data")
1193
1194 ALL_OPT = cli_option("--all", dest="show_all",
1195 default=False, action="store_true",
1196 help="Show info on all instances on the cluster."
1197 " This can take a long time to run, use wisely")
1198
1199 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1200 action="store_true", default=False,
1201 help="Interactive OS reinstall, lists available"
1202 " OS templates for selection")
1203
1204 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1205 action="store_true", default=False,
1206 help="Remove the instance from the cluster"
1207 " configuration even if there are failures"
1208 " during the removal process")
1209
1210 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1211 dest="ignore_remove_failures",
1212 action="store_true", default=False,
1213 help="Remove the instance from the"
1214 " cluster configuration even if there"
1215 " are failures during the removal"
1216 " process")
1217
1218 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1219 action="store_true", default=False,
1220 help="Remove the instance from the cluster")
1221
1222 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1223 help="Specifies the new node for the instance",
1224 metavar="NODE", default=None,
1225 completion_suggest=OPT_COMPL_ONE_NODE)
1226
1227 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1228 help="Specifies the new secondary node",
1229 metavar="NODE", default=None,
1230 completion_suggest=OPT_COMPL_ONE_NODE)
1231
1232 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1233 help="Specifies the new primary node",
1234 metavar="<node>", default=None,
1235 completion_suggest=OPT_COMPL_ONE_NODE)
1236
1237 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1238 default=False, action="store_true",
1239 help="Replace the disk(s) on the primary"
1240 " node (applies only to internally mirrored"
1241 " disk templates, e.g. %s)" %
1242 utils.CommaJoin(constants.DTS_INT_MIRROR))
1243
1244 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1245 default=False, action="store_true",
1246 help="Replace the disk(s) on the secondary"
1247 " node (applies only to internally mirrored"
1248 " disk templates, e.g. %s)" %
1249 utils.CommaJoin(constants.DTS_INT_MIRROR))
1250
1251 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1252 default=False, action="store_true",
1253 help="Lock all nodes and auto-promote as needed"
1254 " to MC status")
1255
1256 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1257 default=False, action="store_true",
1258 help="Automatically replace faulty disks"
1259 " (applies only to internally mirrored"
1260 " disk templates, e.g. %s)" %
1261 utils.CommaJoin(constants.DTS_INT_MIRROR))
1262
1263 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1264 default=False, action="store_true",
1265 help="Ignore current recorded size"
1266 " (useful for forcing activation when"
1267 " the recorded size is wrong)")
1268
1269 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1270 metavar="<node>",
1271 completion_suggest=OPT_COMPL_ONE_NODE)
1272
1273 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1274 metavar="<dir>")
1275
1276 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1277 help="Specify the secondary ip for the node",
1278 metavar="ADDRESS", default=None)
1279
1280 READD_OPT = cli_option("--readd", dest="readd",
1281 default=False, action="store_true",
1282 help="Readd old node after replacing it")
1283
1284 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1285 default=True, action="store_false",
1286 help="Disable SSH key fingerprint checking")
1287
1288 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1289 default=False, action="store_true",
1290 help="Force the joining of a node")
1291
1292 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1293 type="bool", default=None, metavar=_YORNO,
1294 help="Set the master_candidate flag on the node")
1295
1296 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1297 type="bool", default=None,
1298 help=("Set the offline flag on the node"
1299 " (cluster does not communicate with offline"
1300 " nodes)"))
1301
1302 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1303 type="bool", default=None,
1304 help=("Set the drained flag on the node"
1305 " (excluded from allocation operations)"))
1306
1307 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1308 type="bool", default=None, metavar=_YORNO,
1309 help="Set the master_capable flag on the node")
1310
1311 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1312 type="bool", default=None, metavar=_YORNO,
1313 help="Set the vm_capable flag on the node")
1314
1315 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1316 type="bool", default=None, metavar=_YORNO,
1317 help="Set the allocatable flag on a volume")
1318
1319 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1320 dest="enabled_hypervisors",
1321 help="Comma-separated list of hypervisors",
1322 type="string", default=None)
1323
1324 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1325 dest="enabled_disk_templates",
1326 help="Comma-separated list of "
1327 "disk templates",
1328 type="string", default=None)
1329
1330 ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
1331 default=None,
1332 dest="enabled_user_shutdown",
1333 help="Whether user shutdown is enabled",
1334 type="bool")
1335
1336 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1337 type="keyval", default={},
1338 help="NIC parameters")
1339
1340 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1341 dest="candidate_pool_size", type="int",
1342 help="Set the candidate pool size")
1343
1344 RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1345 type="int", help="Set the maximal number of jobs to "
1346 "run simultaneously")
1347
1348 MAX_TRACK_OPT = cli_option("--max-tracked-jobs", dest="max_tracked_jobs",
1349 type="int", help="Set the maximal number of jobs to "
1350 "be tracked simultaneously for "
1351 "scheduling")
1352
1353 COMPRESSION_TOOLS_OPT = \
1354 cli_option("--compression-tools",
1355 dest="compression_tools", type="string", default=None,
1356 help="Comma-separated list of compression tools which are"
1357 " allowed to be used by Ganeti in various operations")
1358
1359 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1360 help=("Enables LVM and specifies the volume group"
1361 " name (cluster-wide) for disk allocation"
1362 " [%s]" % constants.DEFAULT_VG),
1363 metavar="VG", default=None)
1364
1365 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1366 help="Destroy cluster", action="store_true")
1367
1368 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1369 help="Skip node agreement check (dangerous)",
1370 action="store_true", default=False)
1371
1372 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1373 help="Specify the mac prefix for the instance IP"
1374 " addresses, in the format XX:XX:XX",
1375 metavar="PREFIX",
1376 default=None)
1377
1378 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1379 help="Specify the node interface (cluster-wide)"
1380 " on which the master IP address will be added"
1381 " (cluster init default: %s)" %
1382 constants.DEFAULT_BRIDGE,
1383 metavar="NETDEV",
1384 default=None)
1385
1386 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1387 help="Specify the netmask of the master IP",
1388 metavar="NETMASK",
1389 default=None)
1390
1391 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1392 dest="use_external_mip_script",
1393 help="Specify whether to run a"
1394 " user-provided script for the master"
1395 " IP address turnup and"
1396 " turndown operations",
1397 type="bool", metavar=_YORNO, default=None)
1398
1399 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1400 help="Specify the default directory (cluster-"
1401 "wide) for storing the file-based disks [%s]" %
1402 pathutils.DEFAULT_FILE_STORAGE_DIR,
1403 metavar="DIR",
1404 default=None)
1405
1406 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1407 "--shared-file-storage-dir",
1408 dest="shared_file_storage_dir",
1409 help="Specify the default directory (cluster-wide) for storing the"
1410 " shared file-based disks [%s]" %
1411 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1412 metavar="SHAREDDIR", default=None)
1413
1414 GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1415 "--gluster-storage-dir",
1416 dest="gluster_storage_dir",
1417 help="Specify the default directory (cluster-wide) for mounting Gluster"
1418 " file systems [%s]" %
1419 pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1420 metavar="GLUSTERDIR",
1421 default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1422
1423 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1424 help="Don't modify %s" % pathutils.ETC_HOSTS,
1425 action="store_false", default=True)
1426
1427 MODIFY_ETCHOSTS_OPT = \
1428 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1429 default=None, type="bool",
1430 help="Defines whether the cluster should autonomously modify"
1431 " and keep in sync the /etc/hosts file of the nodes")
1432
1433 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1434 help="Don't initialize SSH keys",
1435 action="store_false", default=True)
1436
1437 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1438 help="Enable parseable error messages",
1439 action="store_true", default=False)
1440
1441 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1442 help="Skip N+1 memory redundancy tests",
1443 action="store_true", default=False)
1444
1445 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1446 help="Type of reboot: soft/hard/full",
1447 default=constants.INSTANCE_REBOOT_HARD,
1448 metavar="<REBOOT>",
1449 choices=list(constants.REBOOT_TYPES))
1450
1451 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1452 dest="ignore_secondaries",
1453 default=False, action="store_true",
1454 help="Ignore errors from secondaries")
1455
1456 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1457 action="store_false", default=True,
1458 help="Don't shutdown the instance (unsafe)")
1459
1460 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1461 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1462 help="Maximum time to wait")
1463
1464 COMPRESS_OPT = cli_option("--compress", dest="compress",
1465 type="string", default=constants.IEC_NONE,
1466 help="The compression mode to use")
1467
1468 TRANSPORT_COMPRESSION_OPT = \
1469 cli_option("--transport-compression", dest="transport_compression",
1470 type="string", default=constants.IEC_NONE,
1471 help="The compression mode to use during transport")
1472
1473 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1474 dest="shutdown_timeout", type="int",
1475 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1476 help="Maximum time to wait for instance"
1477 " shutdown")
1478
1479 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1480 default=None,
1481 help=("Number of seconds between repetions of the"
1482 " command"))
1483
1484 EARLY_RELEASE_OPT = cli_option("--early-release",
1485 dest="early_release", default=False,
1486 action="store_true",
1487 help="Release the locks on the secondary"
1488 " node(s) early")
1489
1490 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1491 dest="new_cluster_cert",
1492 default=False, action="store_true",
1493 help="Generate a new cluster certificate")
1494
1495 NEW_NODE_CERT_OPT = cli_option(
1496 "--new-node-certificates", dest="new_node_cert", default=False,
1497 action="store_true", help="Generate new node certificates (for all nodes)")
1498
1499 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1500 default=None,
1501 help="File containing new RAPI certificate")
1502
1503 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1504 default=None, action="store_true",
1505 help=("Generate a new self-signed RAPI"
1506 " certificate"))
1507
1508 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1509 default=None,
1510 help="File containing new SPICE certificate")
1511
1512 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1513 default=None,
1514 help="File containing the certificate of the CA"
1515 " which signed the SPICE certificate")
1516
1517 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1518 dest="new_spice_cert", default=None,
1519 action="store_true",
1520 help=("Generate a new self-signed SPICE"
1521 " certificate"))
1522
1523 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1524 dest="new_confd_hmac_key",
1525 default=False, action="store_true",
1526 help=("Create a new HMAC key for %s" %
1527 constants.CONFD))
1528
1529 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1530 dest="cluster_domain_secret",
1531 default=None,
1532 help=("Load new new cluster domain"
1533 " secret from file"))
1534
1535 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1536 dest="new_cluster_domain_secret",
1537 default=False, action="store_true",
1538 help=("Create a new cluster domain"
1539 " secret"))
1540
1541 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1542 dest="use_replication_network",
1543 help="Whether to use the replication network"
1544 " for talking to the nodes",
1545 action="store_true", default=False)
1546
1547 MAINTAIN_NODE_HEALTH_OPT = \
1548 cli_option("--maintain-node-health", dest="maintain_node_health",
1549 metavar=_YORNO, default=None, type="bool",
1550 help="Configure the cluster to automatically maintain node"
1551 " health, by shutting down unknown instances, shutting down"
1552 " unknown DRBD devices, etc.")
1553
1554 IDENTIFY_DEFAULTS_OPT = \
1555 cli_option("--identify-defaults", dest="identify_defaults",
1556 default=False, action="store_true",
1557 help="Identify which saved instance parameters are equal to"
1558 " the current cluster defaults and set them as such, instead"
1559 " of marking them as overridden")
1560
1561 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1562 action="store", dest="uid_pool",
1563 help=("A list of user-ids or user-id"
1564 " ranges separated by commas"))
1565
1566 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1567 action="store", dest="add_uids",
1568 help=("A list of user-ids or user-id"
1569 " ranges separated by commas, to be"
1570 " added to the user-id pool"))
1571
1572 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1573 action="store", dest="remove_uids",
1574 help=("A list of user-ids or user-id"
1575 " ranges separated by commas, to be"
1576 " removed from the user-id pool"))
1577
1578 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1579 action="store", dest="reserved_lvs",
1580 help=("A comma-separated list of reserved"
1581 " logical volumes names, that will be"
1582 " ignored by cluster verify"))
1583
1584 ROMAN_OPT = cli_option("--roman",
1585 dest="roman_integers", default=False,
1586 action="store_true",
1587 help="Use roman numbers for positive integers")
1588
1589 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1590 action="store", default=None,
1591 help="Specifies usermode helper for DRBD")
1592
1593 PRIMARY_IP_VERSION_OPT = \
1594 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1595 action="store", dest="primary_ip_version",
1596 metavar="%d|%d" % (constants.IP4_VERSION,
1597 constants.IP6_VERSION),
1598 help="Cluster-wide IP version for primary IP")
1599
1600 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1601 action="store_true",
1602 help="Show machine name for every line in output")
1603
1604 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1605 action="store_true",
1606 help=("Hide successful results and show failures"
1607 " only (determined by the exit code)"))
1608
1609 REASON_OPT = cli_option("--reason", default=None,
1610 help="The reason for executing the command")
1611
1612
1613 def _PriorityOptionCb(option, _, value, parser):
1614 """Callback for processing C{--priority} option.
1615
1616 """
1617 value = _PRIONAME_TO_VALUE[value]
1618
1619 setattr(parser.values, option.dest, value)
1620
1621
1622 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1623 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1624 choices=_PRIONAME_TO_VALUE.keys(),
1625 action="callback", type="choice",
1626 callback=_PriorityOptionCb,
1627 help="Priority for opcode processing")
1628
1629 OPPORTUNISTIC_OPT = cli_option("--opportunistic-locking",
1630 dest="opportunistic_locking",
1631 action="store_true", default=False,
1632 help="Opportunistically acquire locks")
1633
1634 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1635 type="bool", default=None, metavar=_YORNO,
1636 help="Sets the hidden flag on the OS")
1637
1638 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1639 type="bool", default=None, metavar=_YORNO,
1640 help="Sets the blacklisted flag on the OS")
1641
1642 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1643 type="bool", metavar=_YORNO,
1644 dest="prealloc_wipe_disks",
1645 help=("Wipe disks prior to instance"
1646 " creation"))
1647
1648 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1649 type="keyval", default=None,
1650 help="Node parameters")
1651
1652 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1653 action="store", metavar="POLICY", default=None,
1654 help="Allocation policy for the node group")
1655
1656 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1657 type="bool", metavar=_YORNO,
1658 dest="node_powered",
1659 help="Specify if the SoR for node is powered")
1660
1661 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1662 default=constants.OOB_TIMEOUT,
1663 help="Maximum time to wait for out-of-band helper")
1664
1665 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1666 default=constants.OOB_POWER_DELAY,
1667 help="Time in seconds to wait between power-ons")
1668
1669 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1670 action="store_true", default=False,
1671 help=("Whether command argument should be treated"
1672 " as filter"))
1673
1674 NO_REMEMBER_OPT = cli_option("--no-remember",
1675 dest="no_remember",
1676 action="store_true", default=False,
1677 help="Perform but do not record the change"
1678 " in the configuration")
1679
1680 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1681 default=False, action="store_true",
1682 help="Evacuate primary instances only")
1683
1684 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1685 default=False, action="store_true",
1686 help="Evacuate secondary instances only"
1687 " (applies only to internally mirrored"
1688 " disk templates, e.g. %s)" %
1689 utils.CommaJoin(constants.DTS_INT_MIRROR))
1690
1691 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1692 action="store_true", default=False,
1693 help="Pause instance at startup")
1694
1695 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1696 help="Destination node group (name or uuid)",
1697 default=None, action="append",
1698 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1699
1700 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1701 action="append", dest="ignore_errors",
1702 choices=list(constants.CV_ALL_ECODES_STRINGS),
1703 help="Error code to be ignored")
1704
1705 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1706 action="append",
1707 help=("Specify disk state information in the"
1708 " format"
1709 " storage_type/identifier:option=value,...;"
1710 " note this is unused for now"),
1711 type="identkeyval")
1712
1713 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1714 action="append",
1715 help=("Specify hypervisor state information in the"
1716 " format hypervisor:option=value,...;"
1717 " note this is unused for now"),
1718 type="identkeyval")
1719
1720 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1721 action="store_true", default=False,
1722 help="Ignore instance policy violations")
1723
1724 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1725 help="Sets the instance's runtime memory,"
1726 " ballooning it up or down to the new value",
1727 default=None, type="unit", metavar="<size>")
1728
1729 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1730 action="store_true", default=False,
1731 help="Marks the grow as absolute instead of the"
1732 " (default) relative mode")
1733
1734 NETWORK_OPT = cli_option("--network",
1735 action="store", default=None, dest="network",
1736 help="IP network in CIDR notation")
1737
1738 GATEWAY_OPT = cli_option("--gateway",
1739 action="store", default=None, dest="gateway",
1740 help="IP address of the router (gateway)")
1741
1742 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1743 action="store", default=None,
1744 dest="add_reserved_ips",
1745 help="Comma-separated list of"
1746 " reserved IPs to add")
1747
1748 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1749 action="store", default=None,
1750 dest="remove_reserved_ips",
1751 help="Comma-delimited list of"
1752 " reserved IPs to remove")
1753
1754 NETWORK6_OPT = cli_option("--network6",
1755 action="store", default=None, dest="network6",
1756 help="IP network in CIDR notation")
1757
1758 GATEWAY6_OPT = cli_option("--gateway6",
1759 action="store", default=None, dest="gateway6",
1760 help="IP6 address of the router (gateway)")
1761
1762 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1763 dest="conflicts_check",
1764 default=True,
1765 action="store_false",
1766 help="Don't check for conflicting IPs")
1767
1768 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1769 default=False, action="store_true",
1770 help="Include default values")
1771
1772 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1773 action="store_true", default=False,
1774 help="Hotplug supported devices (NICs and Disks)")
1775
1776 HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1777 dest="hotplug_if_possible",
1778 action="store_true", default=False,
1779 help="Hotplug devices in case"
1780 " hotplug is supported")
1781
1782 INSTALL_IMAGE_OPT = \
1783 cli_option("--install-image",
1784 dest="install_image",
1785 action="store",
1786 type="string",
1787 default=None,
1788 help="The OS image to use for running the OS scripts safely")
1789
1790 INSTANCE_COMMUNICATION_OPT = \
1791 cli_option("-c", "--communication",
1792 dest="instance_communication",
1793 help=constants.INSTANCE_COMMUNICATION_DOC,
1794 type="bool")
1795
1796 INSTANCE_COMMUNICATION_NETWORK_OPT = \
1797 cli_option("--instance-communication-network",
1798 dest="instance_communication_network",
1799 type="string",
1800 help="Set the network name for instance communication")
1801
1802 ZEROING_IMAGE_OPT = \
1803 cli_option("--zeroing-image",
1804 dest="zeroing_image", action="store", default=None,
1805 help="The OS image to use to zero instance disks")
1806
1807 ZERO_FREE_SPACE_OPT = \
1808 cli_option("--zero-free-space",
1809 dest="zero_free_space", action="store_true", default=False,
1810 help="Whether to zero the free space on the disks of the "
1811 "instance prior to the export")
1812
1813 HELPER_STARTUP_TIMEOUT_OPT = \
1814 cli_option("--helper-startup-timeout",
1815 dest="helper_startup_timeout", action="store", type="int",
1816 help="Startup timeout for the helper VM")
1817
1818 HELPER_SHUTDOWN_TIMEOUT_OPT = \
1819 cli_option("--helper-shutdown-timeout",
1820 dest="helper_shutdown_timeout", action="store", type="int",
1821 help="Shutdown timeout for the helper VM")
1822
1823 ZEROING_TIMEOUT_FIXED_OPT = \
1824 cli_option("--zeroing-timeout-fixed",
1825 dest="zeroing_timeout_fixed", action="store", type="int",
1826 help="The fixed amount of time to wait before assuming that the "
1827 "zeroing failed")
1828
1829 ZEROING_TIMEOUT_PER_MIB_OPT = \
1830 cli_option("--zeroing-timeout-per-mib",
1831 dest="zeroing_timeout_per_mib", action="store", type="float",
1832 help="The amount of time to wait per MiB of data to zero, in "
1833 "addition to the fixed timeout")
1834
1835 #: Options provided by all commands
1836 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1837
1838 # options related to asynchronous job handling
1839
1840 SUBMIT_OPTS = [
1841 SUBMIT_OPT,
1842 PRINT_JOBID_OPT,
1843 ]
1844
1845 # common options for creating instances. add and import then add their own
1846 # specific ones.
1847 COMMON_CREATE_OPTS = [
1848 BACKEND_OPT,
1849 DISK_OPT,
1850 DISK_TEMPLATE_OPT,
1851 FILESTORE_DIR_OPT,
1852 FILESTORE_DRIVER_OPT,
1853 HYPERVISOR_OPT,
1854 IALLOCATOR_OPT,
1855 NET_OPT,
1856 NODE_PLACEMENT_OPT,
1857 NOIPCHECK_OPT,
1858 NOCONFLICTSCHECK_OPT,
1859 NONAMECHECK_OPT,
1860 NONICS_OPT,
1861 NWSYNC_OPT,
1862 OSPARAMS_OPT,
1863 OSPARAMS_PRIVATE_OPT,
1864 OSPARAMS_SECRET_OPT,
1865 OS_SIZE_OPT,
1866 OPPORTUNISTIC_OPT,
1867 SUBMIT_OPT,
1868 PRINT_JOBID_OPT,
1869 TAG_ADD_OPT,
1870 DRY_RUN_OPT,
1871 PRIORITY_OPT,
1872 ]
1873
1874 # common instance policy options
1875 INSTANCE_POLICY_OPTS = [
1876 IPOLICY_BOUNDS_SPECS_OPT,
1877 IPOLICY_DISK_TEMPLATES,
1878 IPOLICY_VCPU_RATIO,
1879 IPOLICY_SPINDLE_RATIO,
1880 ]
1881
1882 # instance policy split specs options
1883 SPLIT_ISPECS_OPTS = [
1884 SPECS_CPU_COUNT_OPT,
1885 SPECS_DISK_COUNT_OPT,
1886 SPECS_DISK_SIZE_OPT,
1887 SPECS_MEM_SIZE_OPT,
1888 SPECS_NIC_COUNT_OPT,
1889 ]
1890
1891
1892 class _ShowUsage(Exception):
1893 """Exception class for L{_ParseArgs}.
1894
1895 """
1896 def __init__(self, exit_error):
1897 """Initializes instances of this class.
1898
1899 @type exit_error: bool
1900 @param exit_error: Whether to report failure on exit
1901
1902 """
1903 Exception.__init__(self)
1904 self.exit_error = exit_error
1905
1906
1907 class _ShowVersion(Exception):
1908 """Exception class for L{_ParseArgs}.
1909
1910 """
1911
1912
1913 def _ParseArgs(binary, argv, commands, aliases, env_override):
1914 """Parser for the command line arguments.
1915
1916 This function parses the arguments and returns the function which
1917 must be executed together with its (modified) arguments.
1918
1919 @param binary: Script name
1920 @param argv: Command line arguments
1921 @param commands: Dictionary containing command definitions
1922 @param aliases: dictionary with command aliases {"alias": "target", ...}
1923 @param env_override: list of env variables allowed for default args
1924 @raise _ShowUsage: If usage description should be shown
1925 @raise _ShowVersion: If version should be shown
1926
1927 """
1928 assert not (env_override - set(commands))
1929 assert not (set(aliases.keys()) & set(commands.keys()))
1930
1931 if len(argv) > 1:
1932 cmd = argv[1]
1933 else:
1934 # No option or command given
1935 raise _ShowUsage(exit_error=True)
1936
1937 if cmd == "--version":
1938 raise _ShowVersion()
1939 elif cmd == "--help":
1940 raise _ShowUsage(exit_error=False)
1941 elif not (cmd in commands or cmd in aliases):
1942 raise _ShowUsage(exit_error=True)
1943
1944 # get command, unalias it, and look it up in commands
1945 if cmd in aliases:
1946 if aliases[cmd] not in commands:
1947 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1948 " command '%s'" % (cmd, aliases[cmd]))
1949
1950 cmd = aliases[cmd]
1951
1952 if cmd in env_override:
1953 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1954 env_args = os.environ.get(args_env_name)
1955 if env_args:
1956 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1957
1958 func, args_def, parser_opts, usage, description = commands[cmd]
1959 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1960 description=description,
1961 formatter=TitledHelpFormatter(),
1962 usage="%%prog %s %s" % (cmd, usage))
1963 parser.disable_interspersed_args()
1964 options, args = parser.parse_args(args=argv[2:])
1965
1966 if not _CheckArguments(cmd, args_def, args):
1967 return None, None, None
1968
1969 return func, options, args
1970
1971
1972 def _FormatUsage(binary, commands):
1973 """Generates a nice description of all commands.
1974
1975 @param binary: Script name
1976 @param commands: Dictionary containing command definitions
1977
1978 """
1979 # compute the max line length for cmd + usage
1980 mlen = min(60, max(map(len, commands)))
1981
1982 yield "Usage: %s {command} [options...] [argument...]" % binary
1983 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1984 yield ""
1985 yield "Commands:"
1986
1987 # and format a nice command list
1988 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1989 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1990 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1991 for line in help_lines:
1992 yield " %-*s %s" % (mlen, "", line)
1993
1994 yield ""
1995
1996
1997 def _CheckArguments(cmd, args_def, args):
1998 """Verifies the arguments using the argument definition.
1999
2000 Algorithm:
2001
2002 1. Abort with error if values specified by user but none expected.
2003
2004 1. For each argument in definition
2005
2006 1. Keep running count of minimum number of values (min_count)
2007 1. Keep running count of maximum number of values (max_count)
2008 1. If it has an unlimited number of values
2009
2010 1. Abort with error if it's not the last argument in the definition
2011
2012 1. If last argument has limited number of values
2013
2014 1. Abort with error if number of values doesn't match or is too large
2015
2016 1. Abort with error if user didn't pass enough values (min_count)
2017
2018 """
2019 if args and not args_def:
2020 ToStderr("Error: Command %s expects no arguments", cmd)
2021 return False
2022
2023 min_count = None
2024 max_count = None
2025 check_max = None
2026
2027 last_idx = len(args_def) - 1
2028
2029 for idx, arg in enumerate(args_def):
2030 if min_count is None:
2031 min_count = arg.min
2032 elif arg.min is not None:
2033 min_count += arg.min
2034
2035 if max_count is None:
2036 max_count = arg.max
2037 elif arg.max is not None:
2038 max_count += arg.max
2039
2040 if idx == last_idx:
2041 check_max = (arg.max is not None)
2042
2043 elif arg.max is None:
2044 raise errors.ProgrammerError("Only the last argument can have max=None")
2045
2046 if check_max:
2047 # Command with exact number of arguments
2048 if (min_count is not None and max_count is not None and
2049 min_count == max_count and len(args) != min_count):
2050 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
2051 return False
2052
2053 # Command with limited number of arguments
2054 if max_count is not None and len(args) > max_count:
2055 ToStderr("Error: Command %s expects only %d argument(s)",
2056 cmd, max_count)
2057 return False
2058
2059 # Command with some required arguments
2060 if min_count is not None and len(args) < min_count:
2061 ToStderr("Error: Command %s expects at least %d argument(s)",
2062 cmd, min_count)
2063 return False
2064
2065 return True
2066
2067
2068 def SplitNodeOption(value):
2069 """Splits the value of a --node option.
2070
2071 """
2072 if value and ":" in value:
2073 return value.split(":", 1)
2074 else:
2075 return (value, None)
2076
2077
2078 def CalculateOSNames(os_name, os_variants):
2079 """Calculates all the names an OS can be called, according to its variants.
2080
2081 @type os_name: string
2082 @param os_name: base name of the os
2083 @type os_variants: list or None
2084 @param os_variants: list of supported variants
2085 @rtype: list
2086 @return: list of valid names
2087
2088 """
2089 if os_variants:
2090 return ["%s+%s" % (os_name, v) for v in os_variants]
2091 else:
2092 return [os_name]
2093
2094
2095 def ParseFields(selected, default):
2096 """Parses the values of "--field"-like options.
2097
2098 @type selected: string or None
2099 @param selected: User-selected options
2100 @type default: list
2101 @param default: Default fields
2102
2103 """
2104 if selected is None:
2105 return default
2106
2107 if selected.startswith("+"):
2108 return default + selected[1:].split(",")
2109
2110 return selected.split(",")
2111
2112
2113 UsesRPC = rpc.RunWithRPC
2114
2115
2116 def AskUser(text, choices=None):
2117 """Ask the user a question.
2118
2119 @param text: the question to ask
2120
2121 @param choices: list with elements tuples (input_char, return_value,
2122 description); if not given, it will default to: [('y', True,
2123 'Perform the operation'), ('n', False, 'Do no do the operation')];
2124 note that the '?' char is reserved for help
2125
2126 @return: one of the return values from the choices list; if input is
2127 not possible (i.e. not running with a tty, we return the last
2128 entry from the list
2129
2130 """
2131 if choices is None:
2132 choices = [("y", True, "Perform the operation"),
2133 ("n", False, "Do not perform the operation")]
2134 if not choices or not isinstance(choices, list):
2135 raise errors.ProgrammerError("Invalid choices argument to AskUser")
2136 for entry in choices:
2137 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
2138 raise errors.ProgrammerError("Invalid choices element to AskUser")
2139
2140 answer = choices[-1][1]
2141 new_text = []
2142 for line in text.splitlines():
2143 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
2144 text = "\n".join(new_text)
2145 try:
2146 f = file("/dev/tty", "a+")
2147 except IOError:
2148 return answer
2149 try:
2150 chars = [entry[0] for entry in choices]
2151 chars[-1] = "[%s]" % chars[-1]
2152 chars.append("?")
2153 maps = dict([(entry[0], entry[1]) for entry in choices])
2154 while True:
2155 f.write(text)
2156 f.write("\n")
2157 f.write("/".join(chars))
2158 f.write(": ")
2159 line = f.readline(2).strip().lower()
2160 if line in maps:
2161 answer = maps[line]
2162 break
2163 elif line == "?":
2164 for entry in choices:
2165 f.write(" %s - %s\n" % (entry[0], entry[2]))
2166 f.write("\n")
2167 continue
2168 finally:
2169 f.close()
2170 return answer
2171
2172
2173 class JobSubmittedException(Exception):
2174 """Job was submitted, client should exit.
2175
2176 This exception has one argument, the ID of the job that was
2177 submitted. The handler should print this ID.
2178
2179 This is not an error, just a structured way to exit from clients.
2180
2181 """
2182
2183
2184 def SendJob(ops, cl=None):
2185 """Function to submit an opcode without waiting for the results.
2186
2187 @type ops: list
2188 @param ops: list of opcodes
2189 @type cl: luxi.Client
2190 @param cl: the luxi client to use for communicating with the master;
2191 if None, a new client will be created
2192
2193 """
2194 if cl is None:
2195 cl = GetClient()
2196
2197 job_id = cl.SubmitJob(ops)
2198
2199 return job_id
2200
2201
2202 def GenericPollJob(job_id, cbs, report_cbs):
2203 """Generic job-polling function.
2204
2205 @type job_id: number
2206 @param job_id: Job ID
2207 @type cbs: Instance of L{JobPollCbBase}
2208 @param cbs: Data callbacks
2209 @type report_cbs: Instance of L{JobPollReportCbBase}
2210 @param report_cbs: Reporting callbacks
2211
2212 """
2213 prev_job_info = None
2214 prev_logmsg_serial = None
2215
2216 status = None
2217
2218 while True:
2219 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2220 prev_logmsg_serial)
2221 if not result:
2222 # job not found, go away!
2223 raise errors.JobLost("Job with id %s lost" % job_id)
2224
2225 if result == constants.JOB_NOTCHANGED:
2226 report_cbs.ReportNotChanged(job_id, status)
2227
2228 # Wait again
2229 continue
2230
2231 # Split result, a tuple of (field values, log entries)
2232 (job_info, log_entries) = result
2233 (status, ) = job_info
2234
2235 if log_entries:
2236 for log_entry in log_entries:
2237 (serial, timestamp, log_type, message) = log_entry
2238 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2239 log_type, message)
2240 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2241
2242 # TODO: Handle canceled and archived jobs
2243 elif status in (constants.JOB_STATUS_SUCCESS,
2244 constants.JOB_STATUS_ERROR,
2245 constants.JOB_STATUS_CANCELING,
2246 constants.JOB_STATUS_CANCELED):
2247 break
2248
2249 prev_job_info = job_info
2250
2251 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2252 if not jobs:
2253 raise errors.JobLost("Job with id %s lost" % job_id)
2254
2255 status, opstatus, result = jobs[0]
2256
2257 if status == constants.JOB_STATUS_SUCCESS:
2258 return result
2259
2260 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2261 raise errors.OpExecError("Job was canceled")
2262
2263 has_ok = False
2264 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2265 if status == constants.OP_STATUS_SUCCESS:
2266 has_ok = True
2267 elif status == constants.OP_STATUS_ERROR:
2268 errors.MaybeRaise(msg)
2269
2270 if has_ok:
2271 raise errors.OpExecError("partial failure (opcode %d): %s" %
2272 (idx, msg))
2273
2274 raise errors.OpExecError(str(msg))
2275
2276 # default failure mode
2277 raise errors.OpExecError(result)
2278
2279
2280 class JobPollCbBase(object):
2281 """Base class for L{GenericPollJob} callbacks.
2282
2283 """
2284 def __init__(self):
2285 """Initializes this class.
2286
2287 """
2288
2289 def WaitForJobChangeOnce(self, job_id, fields,
2290 prev_job_info, prev_log_serial):
2291 """Waits for changes on a job.
2292
2293 """
2294 raise NotImplementedError()
2295
2296 def QueryJobs(self, job_ids, fields):
2297 """Returns the selected fields for the selected job IDs.
2298
2299 @type job_ids: list of numbers
2300 @param job_ids: Job IDs
2301 @type fields: list of strings
2302 @param fields: Fields
2303
2304 """
2305 raise NotImplementedError()
2306
2307
2308 class JobPollReportCbBase(object):
2309 """Base class for L{GenericPollJob} reporting callbacks.
2310
2311 """
2312 def __init__(self):
2313 """Initializes this class.
2314
2315 """
2316
2317 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2318 """Handles a log message.
2319
2320 """
2321 raise NotImplementedError()
2322
2323 def ReportNotChanged(self, job_id, status):
2324 """Called for if a job hasn't changed in a while.
2325
2326 @type job_id: number
2327 @param job_id: Job ID
2328 @type status: string or None
2329 @param status: Job status if available
2330
2331 """
2332 raise NotImplementedError()
2333
2334
2335 class _LuxiJobPollCb(JobPollCbBase):
2336 def __init__(self, cl):
2337 """Initializes this class.
2338
2339 """
2340 JobPollCbBase.__init__(self)
2341 self.cl = cl
2342
2343 def WaitForJobChangeOnce(self, job_id, fields,
2344 prev_job_info, prev_log_serial):
2345 """Waits for changes on a job.
2346
2347 """
2348 return self.cl.WaitForJobChangeOnce(job_id, fields,
2349 prev_job_info, prev_log_serial)
2350
2351 def QueryJobs(self, job_ids, fields):
2352 """Returns the selected fields for the selected job IDs.
2353
2354 """
2355 return self.cl.QueryJobs(job_ids, fields)
2356
2357
2358 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2359 def __init__(self, feedback_fn):
2360 """Initializes this class.
2361
2362 """
2363 JobPollReportCbBase.__init__(self)
2364
2365 self.feedback_fn = feedback_fn
2366
2367 assert callable(feedback_fn)
2368
2369 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2370 """Handles a log message.
2371
2372 """
2373 self.feedback_fn((timestamp, log_type, log_msg))
2374
2375 def ReportNotChanged(self, job_id, status):
2376 """Called if a job hasn't changed in a while.
2377
2378 """
2379 # Ignore
2380
2381
2382 class StdioJobPollReportCb(JobPollReportCbBase):
2383 def __init__(self):
2384 """Initializes this class.
2385
2386 """
2387 JobPollReportCbBase.__init__(self)
2388
2389 self.notified_queued = False
2390 self.notified_waitlock = False
2391
2392 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2393 """Handles a log message.
2394
2395 """
2396 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2397 FormatLogMessage(log_type, log_msg))
2398
2399 def ReportNotChanged(self, job_id, status):
2400 """Called if a job hasn't changed in a while.
2401
2402 """
2403 if status is None:
2404 return
2405
2406 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2407 ToStderr("Job %s is waiting in queue", job_id)
2408 self.notified_queued = True
2409
2410 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2411 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2412 self.notified_waitlock = True
2413
2414
2415 def FormatLogMessage(log_type, log_msg):
2416 """Formats a job message according to its type.
2417
2418 """
2419 if log_type != constants.ELOG_MESSAGE:
2420 log_msg = str(log_msg)
2421
2422 return utils.SafeEncode(log_msg)
2423
2424
2425 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2426 """Function to poll for the result of a job.
2427
2428 @type job_id: job identified
2429 @param job_id: the job to poll for results
2430 @type cl: luxi.Client
2431 @param cl: the luxi client to use for communicating with the master;
2432 if None, a new client will be created
2433
2434 """
2435 if cl is None:
2436 cl = GetClient()
2437
2438 if reporter is None:
2439 if feedback_fn:
2440 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2441 else:
2442 reporter = StdioJobPollReportCb()
2443 elif feedback_fn:
2444 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2445
2446 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2447
2448
2449 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2450 """Legacy function to submit an opcode.
2451
2452 This is just a simple wrapper over the construction of the processor
2453 instance. It should be extended to better handle feedback and
2454 interaction functions.
2455
2456 """
2457 if cl is None:
2458 cl = GetClient()
2459
2460 SetGenericOpcodeOpts([op], opts)
2461
2462 job_id = SendJob([op], cl=cl)
2463 if hasattr(opts, "print_jobid") and opts.print_jobid:
2464 ToStdout("%d" % job_id)
2465
2466 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2467 reporter=reporter)
2468
2469 return op_results[0]
2470
2471
2472 def SubmitOpCodeToDrainedQueue(op):
2473 """Forcefully insert a job in the queue, even if it is drained.
2474
2475 """
2476 cl = GetClient()
2477 job_id = cl.SubmitJobToDrainedQueue([op])
2478 op_results = PollJob(job_id, cl=cl)
2479 return op_results[0]
2480
2481
2482 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2483 """Wrapper around SubmitOpCode or SendJob.
2484
2485 This function will decide, based on the 'opts' parameter, whether to
2486 submit and wait for the result of the opcode (and return it), or
2487 whether to just send the job and print its identifier. It is used in
2488 order to simplify the implementation of the '--submit' option.
2489
2490 It will also process the opcodes if we're sending the via SendJob
2491 (otherwise SubmitOpCode does it).
2492
2493 """
2494 if opts and opts.submit_only:
2495 job = [op]
2496 SetGenericOpcodeOpts(job, opts)
2497 job_id = SendJob(job, cl=cl)
2498 if opts.print_jobid:
2499 ToStdout("%d" % job_id)
2500 raise JobSubmittedException(job_id)
2501 else:
2502 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2503
2504
2505 def _InitReasonTrail(op, opts):
2506 """Builds the first part of the reason trail
2507
2508 Builds the initial part of the reason trail, adding the user provided reason
2509 (if it exists) and the name of the command starting the operation.
2510
2511 @param op: the opcode the reason trail will be added to
2512 @param opts: the command line options selected by the user
2513
2514 """
2515 assert len(sys.argv) >= 2
2516 trail = []
2517
2518 if opts.reason:
2519 trail.append((constants.OPCODE_REASON_SRC_USER,
2520 opts.reason,
2521 utils.EpochNano()))
2522
2523 binary = os.path.basename(sys.argv[0])
2524 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2525 command = sys.argv[1]
2526 trail.append((source, command, utils.EpochNano()))
2527 op.reason = trail
2528
2529
2530 def SetGenericOpcodeOpts(opcode_list, options):
2531 """Processor for generic options.
2532
2533 This function updates the given opcodes based on generic command
2534 line options (like debug, dry-run, etc.).
2535
2536 @param opcode_list: list of opcodes
2537 @param options: command line options or None
2538 @return: None (in-place modification)
2539
2540 """
2541 if not options:
2542 return
2543 for op in opcode_list:
2544 op.debug_level = options.debug
2545 if hasattr(options, "dry_run"):
2546 op.dry_run = options.dry_run
2547 if getattr(options, "priority", None) is not None:
2548 op.priority = options.priority
2549 _InitReasonTrail(op, options)
2550
2551
2552 def FormatError(err):
2553 """Return a formatted error message for a given error.
2554
2555 This function takes an exception instance and returns a tuple
2556 consisting of two values: first, the recommended exit code, and
2557 second, a string describing the error message (not
2558 newline-terminated).
2559
2560 """
2561 retcode = 1
2562 obuf = StringIO()
2563 msg = str(err)
2564 if isinstance(err, errors.ConfigurationError):
2565 txt = "Corrupt configuration file: %s" % msg
2566 logging.error(txt)
2567 obuf.write(txt + "\n")
2568 obuf.write("Aborting.")
2569 retcode = 2
2570 elif isinstance(err, errors.HooksAbort):
2571 obuf.write("Failure: hooks execution failed:\n")
2572 for node, script, out in err.args[0]:
2573 if out:
2574 obuf.write(" node: %s, script: %s, output: %s\n" %
2575 (node, script, out))
2576 else:
2577 obuf.write(" node: %s, script: %s (no output)\n" %
2578 (node, script))
2579 elif isinstance(err, errors.HooksFailure):
2580 obuf.write("Failure: hooks general failure: %s" % msg)
2581 elif isinstance(err, errors.ResolverError):
2582 this_host = netutils.Hostname.GetSysName()
2583 if err.args[0] == this_host:
2584 msg = "Failure: can't resolve my own hostname ('%s')"
2585 else:
2586 msg = "Failure: can't resolve hostname '%s'"
2587 obuf.write(msg % err.args[0])
2588 elif isinstance(err, errors.OpPrereqError):
2589 if len(err.args) == 2:
2590 obuf.write("Failure: prerequisites not met for this"
2591 " operation:\nerror type: %s, error details:\n%s" %
2592 (err.args[1], err.args[0]))
2593 else:
2594 obuf.write("Failure: prerequisites not met for this"
2595 " operation:\n%s" % msg)
2596 elif isinstance(err, errors.OpExecError):
2597 obuf.write("Failure: command execution error:\n%s" % msg)
2598 elif isinstance(err, errors.TagError):
2599 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2600 elif isinstance(err, errors.JobQueueDrainError):
2601 obuf.write("Failure: the job queue is marked for drain and doesn't"
2602 " accept new requests\n")
2603 elif isinstance(err, errors.JobQueueFull):
2604 obuf.write("Failure: the job queue is full and doesn't accept new"
2605 " job submissions until old jobs are archived\n")
2606 elif isinstance(err, errors.TypeEnforcementError):
2607 obuf.write("Parameter Error: %s" % msg)
2608 elif isinstance(err, errors.ParameterError):
2609 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2610 elif isinstance(err, rpcerr.NoMasterError):
2611 if err.args[0] == pathutils.MASTER_SOCKET:
2612 daemon = "the master daemon"
2613 elif err.args[0] == pathutils.QUERY_SOCKET:
2614 daemon = "the config daemon"
2615 else:
2616 daemon = "socket '%s'" % str(err.args[0])
2617 obuf.write("Cannot communicate with %s.\nIs the process running"
2618 " and listening for connections?" % daemon)
2619 elif isinstance(err, rpcerr.TimeoutError):
2620 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2621 " been submitted and will continue to run even if the call"
2622 " timed out. Useful commands in this situation are \"gnt-job"
2623 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2624 obuf.write(msg)
2625 elif isinstance(err, rpcerr.PermissionError):
2626 obuf.write("It seems you don't have permissions to connect to the"
2627 " master daemon.\nPlease retry as a different user.")
2628 elif isinstance(err, rpcerr.ProtocolError):
2629 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2630 "%s" % msg)
2631 elif isinstance(err, errors.JobLost):
2632 obuf.write("Error checking job status: %s" % msg)
2633 elif isinstance(err, errors.QueryFilterParseError):
2634 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2635 obuf.write("\n".join(err.GetDetails()))
2636 elif isinstance(err, errors.GenericError):
2637 obuf.write("Unhandled Ganeti error: %s" % msg)
2638 elif isinstance(err, JobSubmittedException):
2639 obuf.write("JobID: %s\n" % err.args[0])
2640 retcode = 0
2641 else:
2642 obuf.write("Unhandled exception: %s" % msg)
2643 return retcode, obuf.getvalue().rstrip("\n")
2644
2645
2646 def GenericMain(commands, override=None, aliases=None,
2647 env_override=frozenset()):
2648 """Generic main function for all the gnt-* commands.
2649
2650 @param commands: a dictionary with a special structure, see the design doc
2651 for command line handling.
2652 @param override: if not None, we expect a dictionary with keys that will
2653 override command line options; this can be used to pass
2654 options from the scripts to generic functions
2655 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2656 @param env_override: list of environment names which are allowed to submit
2657 default args for commands
2658
2659 """
2660 # save the program name and the entire command line for later logging
2661 if sys.argv:
2662 binary = os.path.basename(sys.argv[0])
2663 if not binary:
2664 binary = sys.argv[0]
2665
2666 if len(sys.argv) >= 2:
2667 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2668 else:
2669 logname = binary
2670
2671 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2672 else:
2673 binary = "<unknown program>"
2674 cmdline = "<unknown>"
2675
2676 if aliases is None:
2677 aliases = {}
2678
2679 try:
2680 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2681 env_override)
2682 except _ShowVersion:
2683 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2684 constants.RELEASE_VERSION)
2685 return constants.EXIT_SUCCESS
2686 except _ShowUsage, err:
2687 for line in _FormatUsage(binary, commands):
2688 ToStdout(line)
2689
2690 if err.exit_error:
2691 return constants.EXIT_FAILURE
2692 else:
2693 return constants.EXIT_SUCCESS
2694 except errors.ParameterError, err:
2695 result, err_msg = FormatError(err)
2696 ToStderr(err_msg)
2697 return 1
2698
2699 if func is None: # parse error
2700 return 1
2701
2702 if override is not None:
2703 for key, val in override.iteritems():
2704 setattr(options, key, val)
2705
2706 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2707 stderr_logging=True)
2708
2709 logging.debug("Command line: %s", cmdline)
2710
2711 try:
2712 result = func(options, args)
2713 except (errors.GenericError, rpcerr.ProtocolError,
2714 JobSubmittedException), err:
2715 result, err_msg = FormatError(err)
2716 logging.exception("Error during command processing")
2717 ToStderr(err_msg)
2718 except KeyboardInterrupt:
2719 result = constants.EXIT_FAILURE
2720 ToStderr("Aborted. Note that if the operation created any jobs, they"
2721 " might have been submitted and"
2722 " will continue to run in the background.")
2723 except IOError, err:
2724 if err.errno == errno.EPIPE:
2725 # our terminal went away, we'll exit
2726 sys.exit(constants.EXIT_FAILURE)
2727 else:
2728 raise
2729
2730 return result
2731
2732
2733 def ParseNicOption(optvalue):
2734 """Parses the value of the --net option(s).
2735
2736 """
2737 try:
2738 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2739 except (TypeError, ValueError), err:
2740 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2741 errors.ECODE_INVAL)
2742
2743 nics = [{}] * nic_max
2744 for nidx, ndict in optvalue:
2745 nidx = int(nidx)
2746
2747 if not isinstance(ndict, dict):
2748 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2749 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2750
2751 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2752
2753 nics[nidx] = ndict
2754
2755 return nics
2756
2757
2758 def FixHvParams(hvparams):
2759 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2760 # comma to space because commas cannot be accepted on the command line
2761 # (they already act as the separator between different hvparams). Still,
2762 # RAPI should be able to accept commas for backwards compatibility.
2763 # Therefore, we convert spaces into commas here, and we keep the old
2764 # parsing logic everywhere else.
2765 try:
2766 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2767 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2768 except KeyError:
2769 #No usb_devices, no modification required
2770 pass
2771
2772
2773 def GenericInstanceCreate(mode, opts, args):
2774 """Add an instance to the cluster via either creation or import.
2775
2776 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2777 @param opts: the command line options selected by the user
2778 @type args: list
2779 @param args: should contain only one element, the new instance name
2780 @rtype: int
2781 @return: the desired exit code
2782
2783 """
2784 instance = args[0]
2785
2786 (pnode, snode) = SplitNodeOption(opts.node)
2787
2788 hypervisor = None
2789 hvparams = {}
2790 if opts.hypervisor:
2791 hypervisor, hvparams = opts.hypervisor
2792
2793 if opts.nics:
2794 nics = ParseNicOption(opts.nics)
2795 elif opts.no_nics:
2796 # no nics
2797 nics = []
2798 elif mode == constants.INSTANCE_CREATE:
2799 # default of one nic, all auto
2800 nics = [{}]
2801 else:
2802 # mode == import
2803 nics = []
2804
2805 if opts.disk_template == constants.DT_DISKLESS:
2806 if opts.disks or opts.sd_size is not None:
2807 raise errors.OpPrereqError("Diskless instance but disk"
2808 " information passed", errors.ECODE_INVAL)
2809 disks = []
2810 else:
2811 if (not opts.disks and not opts.sd_size
2812 and mode == constants.INSTANCE_CREATE):
2813 raise errors.OpPrereqError("No disk information specified",
2814 errors.ECODE_INVAL)
2815 if opts.disks and opts.sd_size is not None:
2816 raise errors.OpPrereqError("Please use either the '--disk' or"
2817 " '-s' option", errors.ECODE_INVAL)
2818 if opts.sd_size is not None:
2819 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2820
2821 if opts.disks:
2822 try:
2823 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2824 except ValueError, err:
2825 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2826 errors.ECODE_INVAL)
2827 disks = [{}] * disk_max
2828 else:
2829 disks = []
2830 for didx, ddict in opts.disks:
2831 didx = int(didx)
2832 if not isinstance(ddict, dict):
2833 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2834 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2835 elif constants.IDISK_SIZE in ddict:
2836 if constants.IDISK_ADOPT in ddict:
2837 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2838 " (disk %d)" % didx, errors.ECODE_INVAL)
2839 try:
2840 ddict[constants.IDISK_SIZE] = \
2841 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2842 except ValueError, err:
2843 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2844 (didx, err), errors.ECODE_INVAL)
2845 elif constants.IDISK_ADOPT in ddict:
2846 if constants.IDISK_SPINDLES in ddict:
2847 raise errors.OpPrereqError("spindles is not a valid option when"
2848 " adopting a disk", errors.ECODE_INVAL)
2849 if mode == constants.INSTANCE_IMPORT:
2850 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2851 " import", errors.ECODE_INVAL)
2852 ddict[constants.IDISK_SIZE] = 0
2853 else:
2854 raise errors.OpPrereqError("Missing size or adoption source for"
2855 " disk %d" % didx, errors.ECODE_INVAL)
2856 if constants.IDISK_SPINDLES in ddict:
2857 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2858
2859 disks[didx] = ddict
2860
2861 if opts.tags is not None:
2862 tags = opts.tags.split(",")
2863 else:
2864 tags = []
2865
2866 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2867 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2868 FixHvParams(hvparams)
2869
2870 osparams_private = opts.osparams_private or serializer.PrivateDict()
2871 osparams_secret = opts.osparams_secret or serializer.PrivateDict()
2872
2873 helper_startup_timeout = opts.helper_startup_timeout
2874 helper_shutdown_timeout = opts.helper_shutdown_timeout
2875
2876 if mode == constants.INSTANCE_CREATE:
2877 start = opts.start
2878 os_type = opts.os
2879 force_variant = opts.force_variant
2880 src_node = None
2881 src_path = None
2882 no_install = opts.no_install
2883 identify_defaults = False
2884 compress = constants.IEC_NONE
2885 if opts.instance_communication is None:
2886 instance_communication = False
2887 else:
2888 instance_communication = opts.instance_communication
2889 elif mode == constants.INSTANCE_IMPORT:
2890 start = False
2891 os_type = None
2892 force_variant = False
2893 src_node = opts.src_node
2894 src_path = opts.src_dir
2895 no_install = None
2896 identify_defaults = opts.identify_defaults
2897 compress = opts.compress
2898 instance_communication = False
2899 else:
2900 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2901
2902 op = opcodes.OpInstanceCreate(
2903 instance_name=instance,
2904 disks=disks,
2905 disk_template=opts.disk_template,
2906 nics=nics,
2907 conflicts_check=opts.conflicts_check,
2908 pnode=pnode, snode=snode,
2909 ip_check=opts.ip_check,
2910 name_check=opts.name_check,
2911 wait_for_sync=opts.wait_for_sync,
2912 file_storage_dir=opts.file_storage_dir,
2913 file_driver=opts.file_driver,
2914 iallocator=opts.iallocator,
2915 hypervisor=hypervisor,
2916 hvparams=hvparams,
2917 beparams=opts.beparams,
2918 osparams=opts.osparams,
2919 osparams_private=osparams_private,
2920 osparams_secret=osparams_secret,
2921 mode=mode,
2922 opportunistic_locking=opts.opportunistic_locking,
2923 start=start,
2924 os_type=os_type,
2925 force_variant=force_variant,
2926 src_node=src_node,
2927 src_path=src_path,
2928 compress=compress,
2929 tags=tags,
2930 no_install=no_install,
2931 identify_defaults=identify_defaults,
2932 ignore_ipolicy=opts.ignore_ipolicy,
2933 instance_communication=instance_communication,
2934 helper_startup_timeout=helper_startup_timeout,
2935 helper_shutdown_timeout=helper_shutdown_timeout)
2936
2937 SubmitOrSend(op, opts)
2938 return 0
2939
2940
2941 class _RunWhileDaemonsStoppedHelper(object):
2942 """Helper class for L{RunWhileDaemonsStopped} to simplify state management
2943
2944 """
2945 def __init__(self, feedback_fn, cluster_name, master_node,
2946 online_nodes, ssh_ports, exclude_daemons, debug,
2947 verbose):
2948 """Initializes this class.
2949
2950 @type feedback_fn: callable
2951 @param feedback_fn: Feedback function
2952 @type cluster_name: string
2953 @param cluster_name: Cluster name
2954 @type master_node: string
2955 @param master_node Master node name
2956 @type online_nodes: list
2957 @param online_nodes: List of names of online nodes
2958 @type ssh_ports: list
2959 @param ssh_ports: List of SSH ports of online nodes
2960 @type exclude_daemons: list of string
2961 @param exclude_daemons: list of daemons that will be restarted on master
2962 after all others are shutdown
2963 @type debug: boolean
2964 @param debug: show debug output
2965 @type verbose: boolesn
2966 @param verbose: show verbose output
2967
2968 """
2969 self.feedback_fn = feedback_fn
2970 self.cluster_name = cluster_name
2971 self.master_node = master_node
2972 self.online_nodes = online_nodes
2973 self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2974
2975 self.ssh = ssh.SshRunner(self.cluster_name)
2976
2977 self.nonmaster_nodes = [name for name in online_nodes
2978 if name != master_node]
2979
2980 self.exclude_daemons = exclude_daemons
2981 self.debug = debug
2982 self.verbose = verbose
2983
2984 assert self.master_node not in self.nonmaster_nodes
2985
2986 def _RunCmd(self, node_name, cmd):
2987 """Runs a command on the local or a remote machine.
2988
2989 @type node_name: string
2990 @param node_name: Machine name
2991 @type cmd: list
2992 @param cmd: Command
2993
2994 """
2995 if node_name is None or node_name == self.master_node:
2996 # No need to use SSH
2997 result = utils.RunCmd(cmd)
2998 else:
2999 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
3000 utils.ShellQuoteArgs(cmd),
3001 port=self.ssh_ports[node_name])
3002
3003 if result.failed:
3004 errmsg = ["Failed to run command %s" % result.cmd]
3005 if node_name:
3006 errmsg.append("on node %s" % node_name)
3007 errmsg.append(": exitcode %s and error %s" %
3008 (result.exit_code, result.output))
3009 raise errors.OpExecError(" ".join(errmsg))
3010
3011 def Call(self, fn, *args):
3012 """Call function while all daemons are stopped.
3013
3014 @type fn: callable
3015 @param fn: Function to be called
3016
3017 """
3018 # Pause watcher by acquiring an exclusive lock on watcher state file
3019 self.feedback_fn("Blocking watcher")
3020 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
3021 try:
3022 # TODO: Currently, this just blocks. There's no timeout.
3023 # TODO: Should it be a shared lock?
3024 watcher_block.Exclusive(blocking=True)
3025
3026 # Stop master daemons, so that no new jobs can come in and all running
3027 # ones are finished
3028 self.feedback_fn("Stopping master daemons")
3029 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
3030 try:
3031 # Stop daemons on all nodes
3032 for node_name in self.online_nodes:
3033 self.feedback_fn("Stopping daemons on %s" % node_name)
3034 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
3035 # Starting any daemons listed as exception
3036 if node_name == self.master_node:
3037 for daemon in self.exclude_daemons:
3038 self.feedback_fn("Starting daemon '%s' on %s" % (daemon,
3039 node_name))
3040 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start", daemon])
3041
3042 # All daemons are shut down now
3043 try:
3044 return fn(self, *args)
3045 except Exception, err:
3046 _, errmsg = FormatError(err)
3047 logging.exception("Caught exception")
3048 self.feedback_fn(errmsg)
3049 raise
3050 finally:
3051 # Start cluster again, master node last
3052 for node_name in self.nonmaster_nodes + [self.master_node]:
3053 # Stopping any daemons listed as exception.
3054 # This might look unnecessary, but it makes sure that daemon-util
3055 # starts all daemons in the right order.
3056 if node_name == self.master_node:
3057 self.exclude_daemons.reverse()
3058 for daemon in self.exclude_daemons:
3059 self.feedback_fn("Stopping daemon '%s' on %s" % (daemon,
3060 node_name))
3061 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop", daemon])
3062 self.feedback_fn("Starting daemons on %s" % node_name)
3063 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
3064
3065 finally:
3066 # Resume watcher
3067 watcher_block.Close()
3068
3069
3070 def RunWhileDaemonsStopped(feedback_fn, exclude_daemons, fn, *args, **kwargs):
3071 """Calls a function while all cluster daemons are stopped.
3072
3073 @type feedback_fn: callable
3074 @param feedback_fn: Feedback function
3075 @type exclude_daemons: list of string
3076 @param exclude_daemons: list of daemons that stopped, but immediately
3077 restarted on the master to be available when calling
3078 'fn'. If None, all daemons will be stopped and none
3079 will be started before calling 'fn'.
3080 @type fn: callable
3081 @param fn: Function to be called when daemons are stopped
3082
3083 """
3084 feedback_fn("Gathering cluster information")
3085
3086 # This ensures we're running on the master daemon
3087 cl = GetClient()
3088
3089 (cluster_name, master_node) = \
3090 cl.QueryConfigValues(["cluster_name", "master_node"])
3091
3092 online_nodes = GetOnlineNodes([], cl=cl)
3093 ssh_ports = GetNodesSshPorts(online_nodes, cl)
3094
3095 # Don't keep a reference to the client. The master daemon will go away.
3096 del cl
3097
3098 assert master_node in online_nodes
3099 if exclude_daemons is None:
3100 exclude_daemons = []
3101
3102 debug = kwargs.get("debug", False)
3103 verbose = kwargs.get("verbose", False)
3104
3105 return _RunWhileDaemonsStoppedHelper(
3106 feedback_fn, cluster_name, master_node, online_nodes, ssh_ports,
3107 exclude_daemons, debug, verbose).Call(fn, *args)
3108
3109
3110 def RunWhileClusterStopped(feedback_fn, fn, *args):
3111 """Calls a function while all cluster daemons are stopped.
3112
3113 @type feedback_fn: callable
3114 @param feedback_fn: Feedback function
3115 @type fn: callable
3116 @param fn: Function to be called when daemons are stopped
3117
3118 """
3119 RunWhileDaemonsStopped(feedback_fn, None, fn, *args)
3120
3121
3122 def GenerateTable(headers, fields, separator, data,
3123 numfields=None, unitfields=None,
3124 units=None):
3125 """Prints a table with headers and different fields.
3126
3127 @type headers: dict
3128 @param headers: dictionary mapping field names to headers for
3129 the table
3130 @type fields: list
3131 @param fields: the field names corresponding to each row in
3132 the data field
3133 @param separator: the separator to be used; if this is None,
3134 the default 'smart' algorithm is used which computes optimal
3135 field width, otherwise just the separator is used between
3136 each field
3137 @type data: list
3138 @param data: a list of lists, each sublist being one row to be output
3139 @type numfields: list
3140 @param numfields: a list with the fields that hold numeric
3141 values and thus should be right-aligned
3142 @type unitfields: list
3143 @param unitfields: a list with the fields that hold numeric
3144 values that should be formatted with the units field
3145 @type units: string or None
3146 @param units: the units we should use for formatting, or None for
3147 automatic choice (human-readable for non-separator usage, otherwise
3148 megabytes); this is a one-letter string
3149
3150 """
3151 if units is None:
3152 if separator:
3153 units = "m"
3154 else:
3155 units = "h"
3156
3157 if numfields is None:
3158 numfields = []
3159 if unitfields is None:
3160 unitfields = []
3161
3162 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
3163 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
3164
3165 format_fields = []
3166 for field in fields:
3167 if headers and field not in headers:
3168 # TODO: handle better unknown fields (either revert to old
3169 # style of raising exception, or deal more intelligently with
3170 # variable fields)
3171 headers[field] = field
3172 if separator is not None:
3173 format_fields.append("%s")
3174 elif numfields.Matches(field):
3175 format_fields.append("%*s")
3176 else:
3177 format_fields.append("%-*s")
3178
3179 if separator is None:
3180 mlens = [0 for name in fields]
3181 format_str = " ".join(format_fields)
3182 else:
3183 format_str = separator.replace("%", "%%").join(format_fields)
3184
3185 for row in data:
3186 if row is None:
3187 continue
3188 for idx, val in enumerate(row):
3189 if unitfields.Matches(fields[idx]):
3190 try:
3191 val = int(val)
3192 except (TypeError, ValueError):
3193 pass
3194 else:
3195 val = row[idx] = utils.FormatUnit(val, units)
3196 val = row[idx] = str(val)
3197 if separator is None:
3198 mlens[idx] = max(mlens[idx], len(val))
3199
3200 result = []
3201 if headers:
3202 args = []
3203 for idx, name in enumerate(fields):
3204 hdr = headers[name]
3205 if separator is None:
3206 mlens[idx] = max(mlens[idx], len(hdr))
3207 args.append(mlens[idx])
3208 args.append(hdr)
3209 result.append(format_str % tuple(args))
3210
3211 if separator is None:
3212 assert len(mlens) == len(fields)
3213
3214 if fields and not numfields.Matches(fields[-1]):
3215 mlens[-1] = 0
3216
3217 for line in data:
3218 args = []
3219 if line is None:
3220 line = ["-" for _ in fields]