ab42cf0123799fded067f2450f858692ef809a8c
[ganeti-github.git] / lib / cli.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Module dealing with command line parsing"""
32
33
34 import sys
35 import textwrap
36 import os.path
37 import time
38 import logging
39 import errno
40 import itertools
41 import shlex
42 from cStringIO import StringIO
43
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import opcodes
48 import ganeti.rpc.errors as rpcerr
49 import ganeti.rpc.node as rpc
50 from ganeti import ssh
51 from ganeti import compat
52 from ganeti import netutils
53 from ganeti import qlang
54 from ganeti import objects
55 from ganeti import pathutils
56 from ganeti import serializer
57
58 from ganeti.runtime import (GetClient)
59
60 from optparse import (OptionParser, TitledHelpFormatter,
61 Option, OptionValueError)
62
63
64 __all__ = [
65 # Command line options
66 "ABSOLUTE_OPT",
67 "ADD_UIDS_OPT",
68 "ADD_RESERVED_IPS_OPT",
69 "ALLOCATABLE_OPT",
70 "ALLOC_POLICY_OPT",
71 "ALL_OPT",
72 "ALLOW_FAILOVER_OPT",
73 "AUTO_PROMOTE_OPT",
74 "AUTO_REPLACE_OPT",
75 "BACKEND_OPT",
76 "BLK_OS_OPT",
77 "CAPAB_MASTER_OPT",
78 "CAPAB_VM_OPT",
79 "CLEANUP_OPT",
80 "CLUSTER_DOMAIN_SECRET_OPT",
81 "CONFIRM_OPT",
82 "CP_SIZE_OPT",
83 "COMPRESSION_TOOLS_OPT",
84 "DEBUG_OPT",
85 "DEBUG_SIMERR_OPT",
86 "DISKIDX_OPT",
87 "DISK_OPT",
88 "DISK_PARAMS_OPT",
89 "DISK_TEMPLATE_OPT",
90 "DRAINED_OPT",
91 "DRY_RUN_OPT",
92 "DRBD_HELPER_OPT",
93 "DST_NODE_OPT",
94 "EARLY_RELEASE_OPT",
95 "ENABLED_HV_OPT",
96 "ENABLED_DISK_TEMPLATES_OPT",
97 "ENABLED_USER_SHUTDOWN_OPT",
98 "ERROR_CODES_OPT",
99 "EXT_PARAMS_OPT",
100 "FAILURE_ONLY_OPT",
101 "FIELDS_OPT",
102 "FILESTORE_DIR_OPT",
103 "FILESTORE_DRIVER_OPT",
104 "FORCE_FAILOVER_OPT",
105 "FORCE_FILTER_OPT",
106 "FORCE_OPT",
107 "FORCE_VARIANT_OPT",
108 "GATEWAY_OPT",
109 "GATEWAY6_OPT",
110 "GLOBAL_FILEDIR_OPT",
111 "HID_OS_OPT",
112 "GLOBAL_GLUSTER_FILEDIR_OPT",
113 "GLOBAL_SHARED_FILEDIR_OPT",
114 "HOTPLUG_OPT",
115 "HOTPLUG_IF_POSSIBLE_OPT",
116 "HVLIST_OPT",
117 "HVOPTS_OPT",
118 "HYPERVISOR_OPT",
119 "IALLOCATOR_OPT",
120 "DEFAULT_IALLOCATOR_OPT",
121 "DEFAULT_IALLOCATOR_PARAMS_OPT",
122 "ENABLED_DATA_COLLECTORS_OPT",
123 "IDENTIFY_DEFAULTS_OPT",
124 "IGNORE_CONSIST_OPT",
125 "IGNORE_ERRORS_OPT",
126 "IGNORE_FAILURES_OPT",
127 "IGNORE_OFFLINE_OPT",
128 "IGNORE_REMOVE_FAILURES_OPT",
129 "IGNORE_SECONDARIES_OPT",
130 "IGNORE_SIZE_OPT",
131 "INCLUDEDEFAULTS_OPT",
132 "INTERVAL_OPT",
133 "INSTALL_IMAGE_OPT",
134 "INSTANCE_COMMUNICATION_OPT",
135 "INSTANCE_COMMUNICATION_NETWORK_OPT",
136 "MAC_PREFIX_OPT",
137 "MAINTAIN_NODE_HEALTH_OPT",
138 "MASTER_NETDEV_OPT",
139 "MASTER_NETMASK_OPT",
140 "MAX_TRACK_OPT",
141 "MC_OPT",
142 "MIGRATION_MODE_OPT",
143 "MODIFY_ETCHOSTS_OPT",
144 "NET_OPT",
145 "NETWORK_OPT",
146 "NETWORK6_OPT",
147 "NEW_CLUSTER_CERT_OPT",
148 "NEW_NODE_CERT_OPT",
149 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
150 "NEW_CONFD_HMAC_KEY_OPT",
151 "NEW_RAPI_CERT_OPT",
152 "NEW_PRIMARY_OPT",
153 "NEW_SECONDARY_OPT",
154 "NEW_SPICE_CERT_OPT",
155 "NIC_PARAMS_OPT",
156 "NOCONFLICTSCHECK_OPT",
157 "NODE_FORCE_JOIN_OPT",
158 "NODE_LIST_OPT",
159 "NODE_PLACEMENT_OPT",
160 "NODEGROUP_OPT",
161 "NODE_PARAMS_OPT",
162 "NODE_POWERED_OPT",
163 "NOHDR_OPT",
164 "NOIPCHECK_OPT",
165 "NO_INSTALL_OPT",
166 "NONAMECHECK_OPT",
167 "NOMODIFY_ETCHOSTS_OPT",
168 "NOMODIFY_SSH_SETUP_OPT",
169 "NONICS_OPT",
170 "NONLIVE_OPT",
171 "NONPLUS1_OPT",
172 "NORUNTIME_CHGS_OPT",
173 "NOSHUTDOWN_OPT",
174 "NOSTART_OPT",
175 "NOSSH_KEYCHECK_OPT",
176 "NOVOTING_OPT",
177 "NO_REMEMBER_OPT",
178 "NWSYNC_OPT",
179 "OFFLINE_INST_OPT",
180 "ONLINE_INST_OPT",
181 "ON_PRIMARY_OPT",
182 "ON_SECONDARY_OPT",
183 "OFFLINE_OPT",
184 "OS_OPT",
185 "OSPARAMS_OPT",
186 "OSPARAMS_PRIVATE_OPT",
187 "OSPARAMS_SECRET_OPT",
188 "OS_SIZE_OPT",
189 "OOB_TIMEOUT_OPT",
190 "POWER_DELAY_OPT",
191 "PREALLOC_WIPE_DISKS_OPT",
192 "PRIMARY_IP_VERSION_OPT",
193 "PRIMARY_ONLY_OPT",
194 "PRINT_JOBID_OPT",
195 "PRIORITY_OPT",
196 "RAPI_CERT_OPT",
197 "READD_OPT",
198 "REASON_OPT",
199 "REBOOT_TYPE_OPT",
200 "REMOVE_INSTANCE_OPT",
201 "REMOVE_RESERVED_IPS_OPT",
202 "REMOVE_UIDS_OPT",
203 "RESERVED_LVS_OPT",
204 "RQL_OPT",
205 "RUNTIME_MEM_OPT",
206 "ROMAN_OPT",
207 "SECONDARY_IP_OPT",
208 "SECONDARY_ONLY_OPT",
209 "SELECT_OS_OPT",
210 "SEP_OPT",
211 "SHOWCMD_OPT",
212 "SHOW_MACHINE_OPT",
213 "COMPRESS_OPT",
214 "TRANSPORT_COMPRESSION_OPT",
215 "SHUTDOWN_TIMEOUT_OPT",
216 "SINGLE_NODE_OPT",
217 "SPECS_CPU_COUNT_OPT",
218 "SPECS_DISK_COUNT_OPT",
219 "SPECS_DISK_SIZE_OPT",
220 "SPECS_MEM_SIZE_OPT",
221 "SPECS_NIC_COUNT_OPT",
222 "SPLIT_ISPECS_OPTS",
223 "IPOLICY_STD_SPECS_OPT",
224 "IPOLICY_DISK_TEMPLATES",
225 "IPOLICY_VCPU_RATIO",
226 "SEQUENTIAL_OPT",
227 "SPICE_CACERT_OPT",
228 "SPICE_CERT_OPT",
229 "SRC_DIR_OPT",
230 "SRC_NODE_OPT",
231 "SUBMIT_OPT",
232 "SUBMIT_OPTS",
233 "STARTUP_PAUSED_OPT",
234 "STATIC_OPT",
235 "SYNC_OPT",
236 "TAG_ADD_OPT",
237 "TAG_SRC_OPT",
238 "TIMEOUT_OPT",
239 "TO_GROUP_OPT",
240 "UIDPOOL_OPT",
241 "USEUNITS_OPT",
242 "USE_EXTERNAL_MIP_SCRIPT",
243 "USE_REPL_NET_OPT",
244 "VERBOSE_OPT",
245 "VG_NAME_OPT",
246 "WFSYNC_OPT",
247 "YES_DOIT_OPT",
248 "ZEROING_IMAGE_OPT",
249 "ZERO_FREE_SPACE_OPT",
250 "HELPER_STARTUP_TIMEOUT_OPT",
251 "HELPER_SHUTDOWN_TIMEOUT_OPT",
252 "ZEROING_TIMEOUT_FIXED_OPT",
253 "ZEROING_TIMEOUT_PER_MIB_OPT",
254 "DISK_STATE_OPT",
255 "HV_STATE_OPT",
256 "IGNORE_IPOLICY_OPT",
257 "INSTANCE_POLICY_OPTS",
258 # Generic functions for CLI programs
259 "ConfirmOperation",
260 "CreateIPolicyFromOpts",
261 "GenericMain",
262 "GenericInstanceCreate",
263 "GenericList",
264 "GenericListFields",
265 "GetClient",
266 "GetOnlineNodes",
267 "GetNodesSshPorts",
268 "JobExecutor",
269 "JobSubmittedException",
270 "ParseTimespec",
271 "RunWhileClusterStopped",
272 "SubmitOpCode",
273 "SubmitOpCodeToDrainedQueue",
274 "SubmitOrSend",
275 "UsesRPC",
276 # Formatting functions
277 "ToStderr", "ToStdout",
278 "FormatError",
279 "FormatQueryResult",
280 "FormatParamsDictInfo",
281 "FormatPolicyInfo",
282 "PrintIPolicyCommand",
283 "PrintGenericInfo",
284 "GenerateTable",
285 "AskUser",
286 "FormatTimestamp",
287 "FormatLogMessage",
288 # Tags functions
289 "ListTags",
290 "AddTags",
291 "RemoveTags",
292 # command line options support infrastructure
293 "ARGS_MANY_INSTANCES",
294 "ARGS_MANY_NODES",
295 "ARGS_MANY_GROUPS",
296 "ARGS_MANY_NETWORKS",
297 "ARGS_NONE",
298 "ARGS_ONE_INSTANCE",
299 "ARGS_ONE_NODE",
300 "ARGS_ONE_GROUP",
301 "ARGS_ONE_OS",
302 "ARGS_ONE_NETWORK",
303 "ArgChoice",
304 "ArgCommand",
305 "ArgFile",
306 "ArgGroup",
307 "ArgHost",
308 "ArgInstance",
309 "ArgJobId",
310 "ArgNetwork",
311 "ArgNode",
312 "ArgOs",
313 "ArgExtStorage",
314 "ArgSuggest",
315 "ArgUnknown",
316 "OPT_COMPL_INST_ADD_NODES",
317 "OPT_COMPL_MANY_NODES",
318 "OPT_COMPL_ONE_IALLOCATOR",
319 "OPT_COMPL_ONE_INSTANCE",
320 "OPT_COMPL_ONE_NODE",
321 "OPT_COMPL_ONE_NODEGROUP",
322 "OPT_COMPL_ONE_NETWORK",
323 "OPT_COMPL_ONE_OS",
324 "OPT_COMPL_ONE_EXTSTORAGE",
325 "cli_option",
326 "FixHvParams",
327 "SplitNodeOption",
328 "CalculateOSNames",
329 "ParseFields",
330 "COMMON_CREATE_OPTS",
331 ]
332
333 NO_PREFIX = "no_"
334 UN_PREFIX = "-"
335
336 #: Priorities (sorted)
337 _PRIORITY_NAMES = [
338 ("low", constants.OP_PRIO_LOW),
339 ("normal", constants.OP_PRIO_NORMAL),
340 ("high", constants.OP_PRIO_HIGH),
341 ]
342
343 #: Priority dictionary for easier lookup
344 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
345 # we migrate to Python 2.6
346 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
347
348 # Query result status for clients
349 (QR_NORMAL,
350 QR_UNKNOWN,
351 QR_INCOMPLETE) = range(3)
352
353 #: Maximum batch size for ChooseJob
354 _CHOOSE_BATCH = 25
355
356
357 # constants used to create InstancePolicy dictionary
358 TISPECS_GROUP_TYPES = {
359 constants.ISPECS_MIN: constants.VTYPE_INT,
360 constants.ISPECS_MAX: constants.VTYPE_INT,
361 }
362
363 TISPECS_CLUSTER_TYPES = {
364 constants.ISPECS_MIN: constants.VTYPE_INT,
365 constants.ISPECS_MAX: constants.VTYPE_INT,
366 constants.ISPECS_STD: constants.VTYPE_INT,
367 }
368
369 #: User-friendly names for query2 field types
370 _QFT_NAMES = {
371 constants.QFT_UNKNOWN: "Unknown",
372 constants.QFT_TEXT: "Text",
373 constants.QFT_BOOL: "Boolean",
374 constants.QFT_NUMBER: "Number",
375 constants.QFT_NUMBER_FLOAT: "Floating-point number",
376 constants.QFT_UNIT: "Storage size",
377 constants.QFT_TIMESTAMP: "Timestamp",
378 constants.QFT_OTHER: "Custom",
379 }
380
381
382 class _Argument(object):
383 def __init__(self, min=0, max=None): # pylint: disable=W0622
384 self.min = min
385 self.max = max
386
387 def __repr__(self):
388 return ("<%s min=%s max=%s>" %
389 (self.__class__.__name__, self.min, self.max))
390
391
392 class ArgSuggest(_Argument):
393 """Suggesting argument.
394
395 Value can be any of the ones passed to the constructor.
396
397 """
398 # pylint: disable=W0622
399 def __init__(self, min=0, max=None, choices=None):
400 _Argument.__init__(self, min=min, max=max)
401 self.choices = choices
402
403 def __repr__(self):
404 return ("<%s min=%s max=%s choices=%r>" %
405 (self.__class__.__name__, self.min, self.max, self.choices))
406
407
408 class ArgChoice(ArgSuggest):
409 """Choice argument.
410
411 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
412 but value must be one of the choices.
413
414 """
415
416
417 class ArgUnknown(_Argument):
418 """Unknown argument to program (e.g. determined at runtime).
419
420 """
421
422
423 class ArgInstance(_Argument):
424 """Instances argument.
425
426 """
427
428
429 class ArgNode(_Argument):
430 """Node argument.
431
432 """
433
434
435 class ArgNetwork(_Argument):
436 """Network argument.
437
438 """
439
440
441 class ArgGroup(_Argument):
442 """Node group argument.
443
444 """
445
446
447 class ArgJobId(_Argument):
448 """Job ID argument.
449
450 """
451
452
453 class ArgFile(_Argument):
454 """File path argument.
455
456 """
457
458
459 class ArgCommand(_Argument):
460 """Command argument.
461
462 """
463
464
465 class ArgHost(_Argument):
466 """Host argument.
467
468 """
469
470
471 class ArgOs(_Argument):
472 """OS argument.
473
474 """
475
476
477 class ArgExtStorage(_Argument):
478 """ExtStorage argument.
479
480 """
481
482
483 ARGS_NONE = []
484 ARGS_MANY_INSTANCES = [ArgInstance()]
485 ARGS_MANY_NETWORKS = [ArgNetwork()]
486 ARGS_MANY_NODES = [ArgNode()]
487 ARGS_MANY_GROUPS = [ArgGroup()]
488 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
489 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
490 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
491 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
492 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
493
494
495 def _ExtractTagsObject(opts, args):
496 """Extract the tag type object.
497
498 Note that this function will modify its args parameter.
499
500 """
501 if not hasattr(opts, "tag_type"):
502 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
503 kind = opts.tag_type
504 if kind == constants.TAG_CLUSTER:
505 retval = kind, ""
506 elif kind in (constants.TAG_NODEGROUP,
507 constants.TAG_NODE,
508 constants.TAG_NETWORK,
509 constants.TAG_INSTANCE):
510 if not args:
511 raise errors.OpPrereqError("no arguments passed to the command",
512 errors.ECODE_INVAL)
513 name = args.pop(0)
514 retval = kind, name
515 else:
516 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
517 return retval
518
519
520 def _ExtendTags(opts, args):
521 """Extend the args if a source file has been given.
522
523 This function will extend the tags with the contents of the file
524 passed in the 'tags_source' attribute of the opts parameter. A file
525 named '-' will be replaced by stdin.
526
527 """
528 fname = opts.tags_source
529 if fname is None:
530 return
531 if fname == "-":
532 new_fh = sys.stdin
533 else:
534 new_fh = open(fname, "r")
535 new_data = []
536 try:
537 # we don't use the nice 'new_data = [line.strip() for line in fh]'
538 # because of python bug 1633941
539 while True:
540 line = new_fh.readline()
541 if not line:
542 break
543 new_data.append(line.strip())
544 finally:
545 new_fh.close()
546 args.extend(new_data)
547
548
549 def ListTags(opts, args):
550 """List the tags on a given object.
551
552 This is a generic implementation that knows how to deal with all
553 three cases of tag objects (cluster, node, instance). The opts
554 argument is expected to contain a tag_type field denoting what
555 object type we work on.
556
557 """
558 kind, name = _ExtractTagsObject(opts, args)
559 cl = GetClient()
560 result = cl.QueryTags(kind, name)
561 result = list(result)
562 result.sort()
563 for tag in result:
564 ToStdout(tag)
565
566
567 def AddTags(opts, args):
568 """Add tags on a given object.
569
570 This is a generic implementation that knows how to deal with all
571 three cases of tag objects (cluster, node, instance). The opts
572 argument is expected to contain a tag_type field denoting what
573 object type we work on.
574
575 """
576 kind, name = _ExtractTagsObject(opts, args)
577 _ExtendTags(opts, args)
578 if not args:
579 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
580 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
581 SubmitOrSend(op, opts)
582
583
584 def RemoveTags(opts, args):
585 """Remove tags from a given object.
586
587 This is a generic implementation that knows how to deal with all
588 three cases of tag objects (cluster, node, instance). The opts
589 argument is expected to contain a tag_type field denoting what
590 object type we work on.
591
592 """
593 kind, name = _ExtractTagsObject(opts, args)
594 _ExtendTags(opts, args)
595 if not args:
596 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
597 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
598 SubmitOrSend(op, opts)
599
600
601 def check_unit(option, opt, value): # pylint: disable=W0613
602 """OptParsers custom converter for units.
603
604 """
605 try:
606 return utils.ParseUnit(value)
607 except errors.UnitParseError, err:
608 raise OptionValueError("option %s: %s" % (opt, err))
609
610
611 def _SplitKeyVal(opt, data, parse_prefixes):
612 """Convert a KeyVal string into a dict.
613
614 This function will convert a key=val[,...] string into a dict. Empty
615 values will be converted specially: keys which have the prefix 'no_'
616 will have the value=False and the prefix stripped, keys with the prefix
617 "-" will have value=None and the prefix stripped, and the others will
618 have value=True.
619
620 @type opt: string
621 @param opt: a string holding the option name for which we process the
622 data, used in building error messages
623 @type data: string
624 @param data: a string of the format key=val,key=val,...
625 @type parse_prefixes: bool
626 @param parse_prefixes: whether to handle prefixes specially
627 @rtype: dict
628 @return: {key=val, key=val}
629 @raises errors.ParameterError: if there are duplicate keys
630
631 """
632 kv_dict = {}
633 if data:
634 for elem in utils.UnescapeAndSplit(data, sep=","):
635 if "=" in elem:
636 key, val = elem.split("=", 1)
637 elif parse_prefixes:
638 if elem.startswith(NO_PREFIX):
639 key, val = elem[len(NO_PREFIX):], False
640 elif elem.startswith(UN_PREFIX):
641 key, val = elem[len(UN_PREFIX):], None
642 else:
643 key, val = elem, True
644 else:
645 raise errors.ParameterError("Missing value for key '%s' in option %s" %
646 (elem, opt))
647 if key in kv_dict:
648 raise errors.ParameterError("Duplicate key '%s' in option %s" %
649 (key, opt))
650 kv_dict[key] = val
651 return kv_dict
652
653
654 def _SplitIdentKeyVal(opt, value, parse_prefixes):
655 """Helper function to parse "ident:key=val,key=val" options.
656
657 @type opt: string
658 @param opt: option name, used in error messages
659 @type value: string
660 @param value: expected to be in the format "ident:key=val,key=val,..."
661 @type parse_prefixes: bool
662 @param parse_prefixes: whether to handle prefixes specially (see
663 L{_SplitKeyVal})
664 @rtype: tuple
665 @return: (ident, {key=val, key=val})
666 @raises errors.ParameterError: in case of duplicates or other parsing errors
667
668 """
669 if ":" not in value:
670 ident, rest = value, ""
671 else:
672 ident, rest = value.split(":", 1)
673
674 if parse_prefixes and ident.startswith(NO_PREFIX):
675 if rest:
676 msg = "Cannot pass options when removing parameter groups: %s" % value
677 raise errors.ParameterError(msg)
678 retval = (ident[len(NO_PREFIX):], False)
679 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
680 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
681 if rest:
682 msg = "Cannot pass options when removing parameter groups: %s" % value
683 raise errors.ParameterError(msg)
684 retval = (ident[len(UN_PREFIX):], None)
685 else:
686 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
687 retval = (ident, kv_dict)
688 return retval
689
690
691 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
692 """Custom parser for ident:key=val,key=val options.
693
694 This will store the parsed values as a tuple (ident, {key: val}). As such,
695 multiple uses of this option via action=append is possible.
696
697 """
698 return _SplitIdentKeyVal(opt, value, True)
699
700
701 def check_key_val(option, opt, value): # pylint: disable=W0613
702 """Custom parser class for key=val,key=val options.
703
704 This will store the parsed values as a dict {key: val}.
705
706 """
707 return _SplitKeyVal(opt, value, True)
708
709
710 def check_key_private_val(option, opt, value): # pylint: disable=W0613
711 """Custom parser class for private and secret key=val,key=val options.
712
713 This will store the parsed values as a dict {key: val}.
714
715 """
716 return serializer.PrivateDict(_SplitKeyVal(opt, value, True))
717
718
719 def _SplitListKeyVal(opt, value):
720 retval = {}
721 for elem in value.split("/"):
722 if not elem:
723 raise errors.ParameterError("Empty section in option '%s'" % opt)
724 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
725 if ident in retval:
726 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
727 (ident, opt, elem))
728 raise errors.ParameterError(msg)
729 retval[ident] = valdict
730 return retval
731
732
733 def check_multilist_ident_key_val(_, opt, value):
734 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
735
736 @rtype: list of dictionary
737 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
738
739 """
740 retval = []
741 for line in value.split("//"):
742 retval.append(_SplitListKeyVal(opt, line))
743 return retval
744
745
746 def check_bool(option, opt, value): # pylint: disable=W0613
747 """Custom parser for yes/no options.
748
749 This will store the parsed value as either True or False.
750
751 """
752 value = value.lower()
753 if value == constants.VALUE_FALSE or value == "no":
754 return False
755 elif value == constants.VALUE_TRUE or value == "yes":
756 return True
757 else:
758 raise errors.ParameterError("Invalid boolean value '%s'" % value)
759
760
761 def check_list(option, opt, value): # pylint: disable=W0613
762 """Custom parser for comma-separated lists.
763
764 """
765 # we have to make this explicit check since "".split(",") is [""],
766 # not an empty list :(
767 if not value:
768 return []
769 else:
770 return utils.UnescapeAndSplit(value)
771
772
773 def check_maybefloat(option, opt, value): # pylint: disable=W0613
774 """Custom parser for float numbers which might be also defaults.
775
776 """
777 value = value.lower()
778
779 if value == constants.VALUE_DEFAULT:
780 return value
781 else:
782 return float(value)
783
784
785 # completion_suggestion is normally a list. Using numeric values not evaluating
786 # to False for dynamic completion.
787 (OPT_COMPL_MANY_NODES,
788 OPT_COMPL_ONE_NODE,
789 OPT_COMPL_ONE_INSTANCE,
790 OPT_COMPL_ONE_OS,
791 OPT_COMPL_ONE_EXTSTORAGE,
792 OPT_COMPL_ONE_IALLOCATOR,
793 OPT_COMPL_ONE_NETWORK,
794 OPT_COMPL_INST_ADD_NODES,
795 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
796
797 OPT_COMPL_ALL = compat.UniqueFrozenset([
798 OPT_COMPL_MANY_NODES,
799 OPT_COMPL_ONE_NODE,
800 OPT_COMPL_ONE_INSTANCE,
801 OPT_COMPL_ONE_OS,
802 OPT_COMPL_ONE_EXTSTORAGE,
803 OPT_COMPL_ONE_IALLOCATOR,
804 OPT_COMPL_ONE_NETWORK,
805 OPT_COMPL_INST_ADD_NODES,
806 OPT_COMPL_ONE_NODEGROUP,
807 ])
808
809
810 class CliOption(Option):
811 """Custom option class for optparse.
812
813 """
814 ATTRS = Option.ATTRS + [
815 "completion_suggest",
816 ]
817 TYPES = Option.TYPES + (
818 "multilistidentkeyval",
819 "identkeyval",
820 "keyval",
821 "keyprivateval",
822 "unit",
823 "bool",
824 "list",
825 "maybefloat",
826 )
827 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
828 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
829 TYPE_CHECKER["identkeyval"] = check_ident_key_val
830 TYPE_CHECKER["keyval"] = check_key_val
831 TYPE_CHECKER["keyprivateval"] = check_key_private_val
832 TYPE_CHECKER["unit"] = check_unit
833 TYPE_CHECKER["bool"] = check_bool
834 TYPE_CHECKER["list"] = check_list
835 TYPE_CHECKER["maybefloat"] = check_maybefloat
836
837
838 # optparse.py sets make_option, so we do it for our own option class, too
839 cli_option = CliOption
840
841
842 _YORNO = "yes|no"
843
844 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
845 help="Increase debugging level")
846
847 NOHDR_OPT = cli_option("--no-headers", default=False,
848 action="store_true", dest="no_headers",
849 help="Don't display column headers")
850
851 SEP_OPT = cli_option("--separator", default=None,
852 action="store", dest="separator",
853 help=("Separator between output fields"
854 " (defaults to one space)"))
855
856 USEUNITS_OPT = cli_option("--units", default=None,
857 dest="units", choices=("h", "m", "g", "t"),
858 help="Specify units for output (one of h/m/g/t)")
859
860 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
861 type="string", metavar="FIELDS",
862 help="Comma separated list of output fields")
863
864 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
865 default=False, help="Force the operation")
866
867 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
868 default=False, help="Do not require confirmation")
869
870 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
871 action="store_true", default=False,
872 help=("Ignore offline nodes and do as much"
873 " as possible"))
874
875 TAG_ADD_OPT = cli_option("--tags", dest="tags",
876 default=None, help="Comma-separated list of instance"
877 " tags")
878
879 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
880 default=None, help="File with tag names")
881
882 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
883 default=False, action="store_true",
884 help=("Submit the job and return the job ID, but"
885 " don't wait for the job to finish"))
886
887 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
888 default=False, action="store_true",
889 help=("Additionally print the job as first line"
890 " on stdout (for scripting)."))
891
892 SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential",
893 default=False, action="store_true",
894 help=("Execute all resulting jobs sequentially"))
895
896 SYNC_OPT = cli_option("--sync", dest="do_locking",
897 default=False, action="store_true",
898 help=("Grab locks while doing the queries"
899 " in order to ensure more consistent results"))
900
901 DRY_RUN_OPT = cli_option("--dry-run", default=False,
902 action="store_true",
903 help=("Do not execute the operation, just run the"
904 " check steps and verify if it could be"
905 " executed"))
906
907 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
908 action="store_true",
909 help="Increase the verbosity of the operation")
910
911 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
912 action="store_true", dest="simulate_errors",
913 help="Debugging option that makes the operation"
914 " treat most runtime checks as failed")
915
916 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
917 default=True, action="store_false",
918 help="Don't wait for sync (DANGEROUS!)")
919
920 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
921 default=False, action="store_true",
922 help="Wait for disks to sync")
923
924 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
925 action="store_true", default=False,
926 help="Enable offline instance")
927
928 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
929 action="store_true", default=False,
930 help="Disable down instance")
931
932 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
933 help=("Custom disk setup (%s)" %
934 utils.CommaJoin(constants.DISK_TEMPLATES)),
935 default=None, metavar="TEMPL",
936 choices=list(constants.DISK_TEMPLATES))
937
938 EXT_PARAMS_OPT = cli_option("-e", "--ext-params", dest="ext_params",
939 default={}, type="keyval",
940 help="Parameters for ExtStorage template"
941 " conversions in the format:"
942 " provider=prvdr[,param1=val1,param2=val2,...]")
943
944 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
945 help="Do not create any network cards for"
946 " the instance")
947
948 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
949 help="Relative path under default cluster-wide"
950 " file storage dir to store file-based disks",
951 default=None, metavar="<DIR>")
952
953 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
954 help="Driver to use for image files",
955 default=None, metavar="<DRIVER>",
956 choices=list(constants.FILE_DRIVER))
957
958 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
959 help="Select nodes for the instance automatically"
960 " using the <NAME> iallocator plugin",
961 default=None, type="string",
962 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
963
964 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
965 metavar="<NAME>",
966 help="Set the default instance"
967 " allocator plugin",
968 default=None, type="string",
969 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
970
971 DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
972 dest="default_iallocator_params",
973 help="iallocator template"
974 " parameters, in the format"
975 " template:option=value,"
976 " option=value,...",
977 type="keyval",
978 default={})
979
980 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
981 metavar="<os>",
982 completion_suggest=OPT_COMPL_ONE_OS)
983
984 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
985 type="keyval", default={},
986 help="OS parameters")
987
988 OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private",
989 dest="osparams_private",
990 type="keyprivateval",
991 default=serializer.PrivateDict(),
992 help="Private OS parameters"
993 " (won't be logged)")
994
995 OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret",
996 dest="osparams_secret",
997 type="keyprivateval",
998 default=serializer.PrivateDict(),
999 help="Secret OS parameters (won't be logged or"
1000 " saved; you must supply these for every"
1001 " operation.)")
1002
1003 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
1004 action="store_true", default=False,
1005 help="Force an unknown variant")
1006
1007 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
1008 action="store_true", default=False,
1009 help="Do not install the OS (will"
1010 " enable no-start)")
1011
1012 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
1013 dest="allow_runtime_chgs",
1014 default=True, action="store_false",
1015 help="Don't allow runtime changes")
1016
1017 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
1018 type="keyval", default={},
1019 help="Backend parameters")
1020
1021 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
1022 default={}, dest="hvparams",
1023 help="Hypervisor parameters")
1024
1025 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
1026 help="Disk template parameters, in the format"
1027 " template:option=value,option=value,...",
1028 type="identkeyval", action="append", default=[])
1029
1030 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
1031 type="keyval", default={},
1032 help="Memory size specs: list of key=value,"
1033 " where key is one of min, max, std"
1034 " (in MB or using a unit)")
1035
1036 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
1037 type="keyval", default={},
1038 help="CPU count specs: list of key=value,"
1039 " where key is one of min, max, std")
1040
1041 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
1042 dest="ispecs_disk_count",
1043 type="keyval", default={},
1044 help="Disk count specs: list of key=value,"
1045 " where key is one of min, max, std")
1046
1047 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
1048 type="keyval", default={},
1049 help="Disk size specs: list of key=value,"
1050 " where key is one of min, max, std"
1051 " (in MB or using a unit)")
1052
1053 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
1054 type="keyval", default={},
1055 help="NIC count specs: list of key=value,"
1056 " where key is one of min, max, std")
1057
1058 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
1059 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
1060 dest="ipolicy_bounds_specs",
1061 type="multilistidentkeyval", default=None,
1062 help="Complete instance specs limits")
1063
1064 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1065 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1066 dest="ipolicy_std_specs",
1067 type="keyval", default=None,
1068 help="Complete standard instance specs")
1069
1070 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1071 dest="ipolicy_disk_templates",
1072 type="list", default=None,
1073 help="Comma-separated list of"
1074 " enabled disk templates")
1075
1076 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1077 dest="ipolicy_vcpu_ratio",
1078 type="maybefloat", default=None,
1079 help="The maximum allowed vcpu-to-cpu ratio")
1080
1081 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1082 dest="ipolicy_spindle_ratio",
1083 type="maybefloat", default=None,
1084 help=("The maximum allowed instances to"
1085 " spindle ratio"))
1086
1087 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1088 help="Hypervisor and hypervisor options, in the"
1089 " format hypervisor:option=value,option=value,...",
1090 default=None, type="identkeyval")
1091
1092 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1093 help="Hypervisor and hypervisor options, in the"
1094 " format hypervisor:option=value,option=value,...",
1095 default=[], action="append", type="identkeyval")
1096
1097 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1098 action="store_false",
1099 help="Don't check that the instance's IP"
1100 " is alive")
1101
1102 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1103 default=True, action="store_false",
1104 help="Don't check that the instance's name"
1105 " is resolvable")
1106
1107 NET_OPT = cli_option("--net",
1108 help="NIC parameters", default=[],
1109 dest="nics", action="append", type="identkeyval")
1110
1111 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1112 dest="disks", action="append", type="identkeyval")
1113
1114 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1115 help="Comma-separated list of disks"
1116 " indices to act on (e.g. 0,2) (optional,"
1117 " defaults to all disks)")
1118
1119 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1120 help="Enforces a single-disk configuration using the"
1121 " given disk size, in MiB unless a suffix is used",
1122 default=None, type="unit", metavar="<size>")
1123
1124 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1125 dest="ignore_consistency",
1126 action="store_true", default=False,
1127 help="Ignore the consistency of the disks on"
1128 " the secondary")
1129
1130 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1131 dest="allow_failover",
1132 action="store_true", default=False,
1133 help="If migration is not possible fallback to"
1134 " failover")
1135
1136 FORCE_FAILOVER_OPT = cli_option("--force-failover",
1137 dest="force_failover",
1138 action="store_true", default=False,
1139 help="Do not use migration, always use"
1140 " failover")
1141
1142 NONLIVE_OPT = cli_option("--non-live", dest="live",
1143 default=True, action="store_false",
1144 help="Do a non-live migration (this usually means"
1145 " freeze the instance, save the state, transfer and"
1146 " only then resume running on the secondary node)")
1147
1148 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1149 default=None,
1150 choices=list(constants.HT_MIGRATION_MODES),
1151 help="Override default migration mode (choose"
1152 " either live or non-live")
1153
1154 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1155 help="Target node and optional secondary node",
1156 metavar="<pnode>[:<snode>]",
1157 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1158
1159 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1160 action="append", metavar="<node>",
1161 help="Use only this node (can be used multiple"
1162 " times, if not given defaults to all nodes)",
1163 completion_suggest=OPT_COMPL_ONE_NODE)
1164
1165 NODEGROUP_OPT_NAME = "--node-group"
1166 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1167 dest="nodegroup",
1168 help="Node group (name or uuid)",
1169 metavar="<nodegroup>",
1170 default=None, type="string",
1171 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1172
1173 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1174 metavar="<node>",
1175 completion_suggest=OPT_COMPL_ONE_NODE)
1176
1177 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1178 action="store_false",
1179 help="Don't start the instance after creation")
1180
1181 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1182 action="store_true", default=False,
1183 help="Show command instead of executing it")
1184
1185 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1186 default=False, action="store_true",
1187 help="Instead of performing the migration/failover,"
1188 " try to recover from a failed cleanup. This is safe"
1189 " to run even if the instance is healthy, but it"
1190 " will create extra replication traffic and "
1191 " disrupt briefly the replication (like during the"
1192 " migration/failover")
1193
1194 STATIC_OPT = cli_option("-s", "--static", dest="static",
1195 action="store_true", default=False,
1196 help="Only show configuration data, not runtime data")
1197
1198 ALL_OPT = cli_option("--all", dest="show_all",
1199 default=False, action="store_true",
1200 help="Show info on all instances on the cluster."
1201 " This can take a long time to run, use wisely")
1202
1203 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1204 action="store_true", default=False,
1205 help="Interactive OS reinstall, lists available"
1206 " OS templates for selection")
1207
1208 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1209 action="store_true", default=False,
1210 help="Remove the instance from the cluster"
1211 " configuration even if there are failures"
1212 " during the removal process")
1213
1214 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1215 dest="ignore_remove_failures",
1216 action="store_true", default=False,
1217 help="Remove the instance from the"
1218 " cluster configuration even if there"
1219 " are failures during the removal"
1220 " process")
1221
1222 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1223 action="store_true", default=False,
1224 help="Remove the instance from the cluster")
1225
1226 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1227 help="Specifies the new node for the instance",
1228 metavar="NODE", default=None,
1229 completion_suggest=OPT_COMPL_ONE_NODE)
1230
1231 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1232 help="Specifies the new secondary node",
1233 metavar="NODE", default=None,
1234 completion_suggest=OPT_COMPL_ONE_NODE)
1235
1236 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1237 help="Specifies the new primary node",
1238 metavar="<node>", default=None,
1239 completion_suggest=OPT_COMPL_ONE_NODE)
1240
1241 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1242 default=False, action="store_true",
1243 help="Replace the disk(s) on the primary"
1244 " node (applies only to internally mirrored"
1245 " disk templates, e.g. %s)" %
1246 utils.CommaJoin(constants.DTS_INT_MIRROR))
1247
1248 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1249 default=False, action="store_true",
1250 help="Replace the disk(s) on the secondary"
1251 " node (applies only to internally mirrored"
1252 " disk templates, e.g. %s)" %
1253 utils.CommaJoin(constants.DTS_INT_MIRROR))
1254
1255 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1256 default=False, action="store_true",
1257 help="Lock all nodes and auto-promote as needed"
1258 " to MC status")
1259
1260 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1261 default=False, action="store_true",
1262 help="Automatically replace faulty disks"
1263 " (applies only to internally mirrored"
1264 " disk templates, e.g. %s)" %
1265 utils.CommaJoin(constants.DTS_INT_MIRROR))
1266
1267 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1268 default=False, action="store_true",
1269 help="Ignore current recorded size"
1270 " (useful for forcing activation when"
1271 " the recorded size is wrong)")
1272
1273 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1274 metavar="<node>",
1275 completion_suggest=OPT_COMPL_ONE_NODE)
1276
1277 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1278 metavar="<dir>")
1279
1280 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1281 help="Specify the secondary ip for the node",
1282 metavar="ADDRESS", default=None)
1283
1284 READD_OPT = cli_option("--readd", dest="readd",
1285 default=False, action="store_true",
1286 help="Readd old node after replacing it")
1287
1288 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1289 default=True, action="store_false",
1290 help="Disable SSH key fingerprint checking")
1291
1292 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1293 default=False, action="store_true",
1294 help="Force the joining of a node")
1295
1296 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1297 type="bool", default=None, metavar=_YORNO,
1298 help="Set the master_candidate flag on the node")
1299
1300 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1301 type="bool", default=None,
1302 help=("Set the offline flag on the node"
1303 " (cluster does not communicate with offline"
1304 " nodes)"))
1305
1306 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1307 type="bool", default=None,
1308 help=("Set the drained flag on the node"
1309 " (excluded from allocation operations)"))
1310
1311 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1312 type="bool", default=None, metavar=_YORNO,
1313 help="Set the master_capable flag on the node")
1314
1315 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1316 type="bool", default=None, metavar=_YORNO,
1317 help="Set the vm_capable flag on the node")
1318
1319 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1320 type="bool", default=None, metavar=_YORNO,
1321 help="Set the allocatable flag on a volume")
1322
1323 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1324 dest="enabled_hypervisors",
1325 help="Comma-separated list of hypervisors",
1326 type="string", default=None)
1327
1328 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1329 dest="enabled_disk_templates",
1330 help="Comma-separated list of "
1331 "disk templates",
1332 type="string", default=None)
1333
1334 ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
1335 default=None,
1336 dest="enabled_user_shutdown",
1337 help="Whether user shutdown is enabled",
1338 type="bool")
1339
1340 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1341 type="keyval", default={},
1342 help="NIC parameters")
1343
1344 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1345 dest="candidate_pool_size", type="int",
1346 help="Set the candidate pool size")
1347
1348 RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1349 type="int", help="Set the maximal number of jobs to "
1350 "run simultaneously")
1351
1352 MAX_TRACK_OPT = cli_option("--max-tracked-jobs", dest="max_tracked_jobs",
1353 type="int", help="Set the maximal number of jobs to "
1354 "be tracked simultaneously for "
1355 "scheduling")
1356
1357 COMPRESSION_TOOLS_OPT = \
1358 cli_option("--compression-tools",
1359 dest="compression_tools", type="string", default=None,
1360 help="Comma-separated list of compression tools which are"
1361 " allowed to be used by Ganeti in various operations")
1362
1363 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1364 help=("Enables LVM and specifies the volume group"
1365 " name (cluster-wide) for disk allocation"
1366 " [%s]" % constants.DEFAULT_VG),
1367 metavar="VG", default=None)
1368
1369 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1370 help="Destroy cluster", action="store_true")
1371
1372 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1373 help="Skip node agreement check (dangerous)",
1374 action="store_true", default=False)
1375
1376 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1377 help="Specify the mac prefix for the instance IP"
1378 " addresses, in the format XX:XX:XX",
1379 metavar="PREFIX",
1380 default=None)
1381
1382 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1383 help="Specify the node interface (cluster-wide)"
1384 " on which the master IP address will be added"
1385 " (cluster init default: %s)" %
1386 constants.DEFAULT_BRIDGE,
1387 metavar="NETDEV",
1388 default=None)
1389
1390 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1391 help="Specify the netmask of the master IP",
1392 metavar="NETMASK",
1393 default=None)
1394
1395 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1396 dest="use_external_mip_script",
1397 help="Specify whether to run a"
1398 " user-provided script for the master"
1399 " IP address turnup and"
1400 " turndown operations",
1401 type="bool", metavar=_YORNO, default=None)
1402
1403 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1404 help="Specify the default directory (cluster-"
1405 "wide) for storing the file-based disks [%s]" %
1406 pathutils.DEFAULT_FILE_STORAGE_DIR,
1407 metavar="DIR",
1408 default=None)
1409
1410 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1411 "--shared-file-storage-dir",
1412 dest="shared_file_storage_dir",
1413 help="Specify the default directory (cluster-wide) for storing the"
1414 " shared file-based disks [%s]" %
1415 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1416 metavar="SHAREDDIR", default=None)
1417
1418 GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1419 "--gluster-storage-dir",
1420 dest="gluster_storage_dir",
1421 help="Specify the default directory (cluster-wide) for mounting Gluster"
1422 " file systems [%s]" %
1423 pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1424 metavar="GLUSTERDIR",
1425 default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1426
1427 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1428 help="Don't modify %s" % pathutils.ETC_HOSTS,
1429 action="store_false", default=True)
1430
1431 MODIFY_ETCHOSTS_OPT = \
1432 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1433 default=None, type="bool",
1434 help="Defines whether the cluster should autonomously modify"
1435 " and keep in sync the /etc/hosts file of the nodes")
1436
1437 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1438 help="Don't initialize SSH keys",
1439 action="store_false", default=True)
1440
1441 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1442 help="Enable parseable error messages",
1443 action="store_true", default=False)
1444
1445 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1446 help="Skip N+1 memory redundancy tests",
1447 action="store_true", default=False)
1448
1449 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1450 help="Type of reboot: soft/hard/full",
1451 default=constants.INSTANCE_REBOOT_HARD,
1452 metavar="<REBOOT>",
1453 choices=list(constants.REBOOT_TYPES))
1454
1455 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1456 dest="ignore_secondaries",
1457 default=False, action="store_true",
1458 help="Ignore errors from secondaries")
1459
1460 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1461 action="store_false", default=True,
1462 help="Don't shutdown the instance (unsafe)")
1463
1464 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1465 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1466 help="Maximum time to wait")
1467
1468 COMPRESS_OPT = cli_option("--compress", dest="compress",
1469 type="string", default=constants.IEC_NONE,
1470 help="The compression mode to use")
1471
1472 TRANSPORT_COMPRESSION_OPT = \
1473 cli_option("--transport-compression", dest="transport_compression",
1474 type="string", default=constants.IEC_NONE,
1475 help="The compression mode to use during transport")
1476
1477 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1478 dest="shutdown_timeout", type="int",
1479 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1480 help="Maximum time to wait for instance"
1481 " shutdown")
1482
1483 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1484 default=None,
1485 help=("Number of seconds between repetions of the"
1486 " command"))
1487
1488 EARLY_RELEASE_OPT = cli_option("--early-release",
1489 dest="early_release", default=False,
1490 action="store_true",
1491 help="Release the locks on the secondary"
1492 " node(s) early")
1493
1494 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1495 dest="new_cluster_cert",
1496 default=False, action="store_true",
1497 help="Generate a new cluster certificate")
1498
1499 NEW_NODE_CERT_OPT = cli_option(
1500 "--new-node-certificates", dest="new_node_cert", default=False,
1501 action="store_true", help="Generate new node certificates (for all nodes)")
1502
1503 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1504 default=None,
1505 help="File containing new RAPI certificate")
1506
1507 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1508 default=None, action="store_true",
1509 help=("Generate a new self-signed RAPI"
1510 " certificate"))
1511
1512 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1513 default=None,
1514 help="File containing new SPICE certificate")
1515
1516 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1517 default=None,
1518 help="File containing the certificate of the CA"
1519 " which signed the SPICE certificate")
1520
1521 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1522 dest="new_spice_cert", default=None,
1523 action="store_true",
1524 help=("Generate a new self-signed SPICE"
1525 " certificate"))
1526
1527 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1528 dest="new_confd_hmac_key",
1529 default=False, action="store_true",
1530 help=("Create a new HMAC key for %s" %
1531 constants.CONFD))
1532
1533 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1534 dest="cluster_domain_secret",
1535 default=None,
1536 help=("Load new new cluster domain"
1537 " secret from file"))
1538
1539 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1540 dest="new_cluster_domain_secret",
1541 default=False, action="store_true",
1542 help=("Create a new cluster domain"
1543 " secret"))
1544
1545 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1546 dest="use_replication_network",
1547 help="Whether to use the replication network"
1548 " for talking to the nodes",
1549 action="store_true", default=False)
1550
1551 MAINTAIN_NODE_HEALTH_OPT = \
1552 cli_option("--maintain-node-health", dest="maintain_node_health",
1553 metavar=_YORNO, default=None, type="bool",
1554 help="Configure the cluster to automatically maintain node"
1555 " health, by shutting down unknown instances, shutting down"
1556 " unknown DRBD devices, etc.")
1557
1558 IDENTIFY_DEFAULTS_OPT = \
1559 cli_option("--identify-defaults", dest="identify_defaults",
1560 default=False, action="store_true",
1561 help="Identify which saved instance parameters are equal to"
1562 " the current cluster defaults and set them as such, instead"
1563 " of marking them as overridden")
1564
1565 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1566 action="store", dest="uid_pool",
1567 help=("A list of user-ids or user-id"
1568 " ranges separated by commas"))
1569
1570 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1571 action="store", dest="add_uids",
1572 help=("A list of user-ids or user-id"
1573 " ranges separated by commas, to be"
1574 " added to the user-id pool"))
1575
1576 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1577 action="store", dest="remove_uids",
1578 help=("A list of user-ids or user-id"
1579 " ranges separated by commas, to be"
1580 " removed from the user-id pool"))
1581
1582 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1583 action="store", dest="reserved_lvs",
1584 help=("A comma-separated list of reserved"
1585 " logical volumes names, that will be"
1586 " ignored by cluster verify"))
1587
1588 ROMAN_OPT = cli_option("--roman",
1589 dest="roman_integers", default=False,
1590 action="store_true",
1591 help="Use roman numbers for positive integers")
1592
1593 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1594 action="store", default=None,
1595 help="Specifies usermode helper for DRBD")
1596
1597 PRIMARY_IP_VERSION_OPT = \
1598 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1599 action="store", dest="primary_ip_version",
1600 metavar="%d|%d" % (constants.IP4_VERSION,
1601 constants.IP6_VERSION),
1602 help="Cluster-wide IP version for primary IP")
1603
1604 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1605 action="store_true",
1606 help="Show machine name for every line in output")
1607
1608 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1609 action="store_true",
1610 help=("Hide successful results and show failures"
1611 " only (determined by the exit code)"))
1612
1613 REASON_OPT = cli_option("--reason", default=None,
1614 help="The reason for executing the command")
1615
1616
1617 def _PriorityOptionCb(option, _, value, parser):
1618 """Callback for processing C{--priority} option.
1619
1620 """
1621 value = _PRIONAME_TO_VALUE[value]
1622
1623 setattr(parser.values, option.dest, value)
1624
1625
1626 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1627 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1628 choices=_PRIONAME_TO_VALUE.keys(),
1629 action="callback", type="choice",
1630 callback=_PriorityOptionCb,
1631 help="Priority for opcode processing")
1632
1633 OPPORTUNISTIC_OPT = cli_option("--opportunistic-locking",
1634 dest="opportunistic_locking",
1635 action="store_true", default=False,
1636 help="Opportunistically acquire locks")
1637
1638 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1639 type="bool", default=None, metavar=_YORNO,
1640 help="Sets the hidden flag on the OS")
1641
1642 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1643 type="bool", default=None, metavar=_YORNO,
1644 help="Sets the blacklisted flag on the OS")
1645
1646 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1647 type="bool", metavar=_YORNO,
1648 dest="prealloc_wipe_disks",
1649 help=("Wipe disks prior to instance"
1650 " creation"))
1651
1652 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1653 type="keyval", default=None,
1654 help="Node parameters")
1655
1656 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1657 action="store", metavar="POLICY", default=None,
1658 help="Allocation policy for the node group")
1659
1660 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1661 type="bool", metavar=_YORNO,
1662 dest="node_powered",
1663 help="Specify if the SoR for node is powered")
1664
1665 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1666 default=constants.OOB_TIMEOUT,
1667 help="Maximum time to wait for out-of-band helper")
1668
1669 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1670 default=constants.OOB_POWER_DELAY,
1671 help="Time in seconds to wait between power-ons")
1672
1673 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1674 action="store_true", default=False,
1675 help=("Whether command argument should be treated"
1676 " as filter"))
1677
1678 NO_REMEMBER_OPT = cli_option("--no-remember",
1679 dest="no_remember",
1680 action="store_true", default=False,
1681 help="Perform but do not record the change"
1682 " in the configuration")
1683
1684 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1685 default=False, action="store_true",
1686 help="Evacuate primary instances only")
1687
1688 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1689 default=False, action="store_true",
1690 help="Evacuate secondary instances only"
1691 " (applies only to internally mirrored"
1692 " disk templates, e.g. %s)" %
1693 utils.CommaJoin(constants.DTS_INT_MIRROR))
1694
1695 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1696 action="store_true", default=False,
1697 help="Pause instance at startup")
1698
1699 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1700 help="Destination node group (name or uuid)",
1701 default=None, action="append",
1702 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1703
1704 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1705 action="append", dest="ignore_errors",
1706 choices=list(constants.CV_ALL_ECODES_STRINGS),
1707 help="Error code to be ignored")
1708
1709 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1710 action="append",
1711 help=("Specify disk state information in the"
1712 " format"
1713 " storage_type/identifier:option=value,...;"
1714 " note this is unused for now"),
1715 type="identkeyval")
1716
1717 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1718 action="append",
1719 help=("Specify hypervisor state information in the"
1720 " format hypervisor:option=value,...;"
1721 " note this is unused for now"),
1722 type="identkeyval")
1723
1724 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1725 action="store_true", default=False,
1726 help="Ignore instance policy violations")
1727
1728 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1729 help="Sets the instance's runtime memory,"
1730 " ballooning it up or down to the new value",
1731 default=None, type="unit", metavar="<size>")
1732
1733 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1734 action="store_true", default=False,
1735 help="Marks the grow as absolute instead of the"
1736 " (default) relative mode")
1737
1738 NETWORK_OPT = cli_option("--network",
1739 action="store", default=None, dest="network",
1740 help="IP network in CIDR notation")
1741
1742 GATEWAY_OPT = cli_option("--gateway",
1743 action="store", default=None, dest="gateway",
1744 help="IP address of the router (gateway)")
1745
1746 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1747 action="store", default=None,
1748 dest="add_reserved_ips",
1749 help="Comma-separated list of"
1750 " reserved IPs to add")
1751
1752 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1753 action="store", default=None,
1754 dest="remove_reserved_ips",
1755 help="Comma-delimited list of"
1756 " reserved IPs to remove")
1757
1758 NETWORK6_OPT = cli_option("--network6",
1759 action="store", default=None, dest="network6",
1760 help="IP network in CIDR notation")
1761
1762 GATEWAY6_OPT = cli_option("--gateway6",
1763 action="store", default=None, dest="gateway6",
1764 help="IP6 address of the router (gateway)")
1765
1766 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1767 dest="conflicts_check",
1768 default=True,
1769 action="store_false",
1770 help="Don't check for conflicting IPs")
1771
1772 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1773 default=False, action="store_true",
1774 help="Include default values")
1775
1776 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1777 action="store_true", default=False,
1778 help="Hotplug supported devices (NICs and Disks)")
1779
1780 HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1781 dest="hotplug_if_possible",
1782 action="store_true", default=False,
1783 help="Hotplug devices in case"
1784 " hotplug is supported")
1785
1786 INSTALL_IMAGE_OPT = \
1787 cli_option("--install-image",
1788 dest="install_image",
1789 action="store",
1790 type="string",
1791 default=None,
1792 help="The OS image to use for running the OS scripts safely")
1793
1794 INSTANCE_COMMUNICATION_OPT = \
1795 cli_option("-c", "--communication",
1796 dest="instance_communication",
1797 help=constants.INSTANCE_COMMUNICATION_DOC,
1798 type="bool")
1799
1800 INSTANCE_COMMUNICATION_NETWORK_OPT = \
1801 cli_option("--instance-communication-network",
1802 dest="instance_communication_network",
1803 type="string",
1804 help="Set the network name for instance communication")
1805
1806 ZEROING_IMAGE_OPT = \
1807 cli_option("--zeroing-image",
1808 dest="zeroing_image", action="store", default=None,
1809 help="The OS image to use to zero instance disks")
1810
1811 ZERO_FREE_SPACE_OPT = \
1812 cli_option("--zero-free-space",
1813 dest="zero_free_space", action="store_true", default=False,
1814 help="Whether to zero the free space on the disks of the "
1815 "instance prior to the export")
1816
1817 HELPER_STARTUP_TIMEOUT_OPT = \
1818 cli_option("--helper-startup-timeout",
1819 dest="helper_startup_timeout", action="store", type="int",
1820 help="Startup timeout for the helper VM")
1821
1822 HELPER_SHUTDOWN_TIMEOUT_OPT = \
1823 cli_option("--helper-shutdown-timeout",
1824 dest="helper_shutdown_timeout", action="store", type="int",
1825 help="Shutdown timeout for the helper VM")
1826
1827 ZEROING_TIMEOUT_FIXED_OPT = \
1828 cli_option("--zeroing-timeout-fixed",
1829 dest="zeroing_timeout_fixed", action="store", type="int",
1830 help="The fixed amount of time to wait before assuming that the "
1831 "zeroing failed")
1832
1833 ZEROING_TIMEOUT_PER_MIB_OPT = \
1834 cli_option("--zeroing-timeout-per-mib",
1835 dest="zeroing_timeout_per_mib", action="store", type="float",
1836 help="The amount of time to wait per MiB of data to zero, in "
1837 "addition to the fixed timeout")
1838
1839 ENABLED_DATA_COLLECTORS_OPT = \
1840 cli_option("--enabled-data-collectors",
1841 dest="enabled_data_collectors", type="keyval",
1842 default={},
1843 help="Deactivate or reactivate a data collector for reporting, "
1844 "in the format collector=bool, where collector is one of %s."
1845 % ", ".join(constants.DATA_COLLECTOR_NAMES))
1846
1847
1848 #: Options provided by all commands
1849 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1850
1851 # options related to asynchronous job handling
1852
1853 SUBMIT_OPTS = [
1854 SUBMIT_OPT,
1855 PRINT_JOBID_OPT,
1856 ]
1857
1858 # common options for creating instances. add and import then add their own
1859 # specific ones.
1860 COMMON_CREATE_OPTS = [
1861 BACKEND_OPT,
1862 DISK_OPT,
1863 DISK_TEMPLATE_OPT,
1864 FILESTORE_DIR_OPT,
1865 FILESTORE_DRIVER_OPT,
1866 HYPERVISOR_OPT,
1867 IALLOCATOR_OPT,
1868 NET_OPT,
1869 NODE_PLACEMENT_OPT,
1870 NODEGROUP_OPT,
1871 NOIPCHECK_OPT,
1872 NOCONFLICTSCHECK_OPT,
1873 NONAMECHECK_OPT,
1874 NONICS_OPT,
1875 NWSYNC_OPT,
1876 OSPARAMS_OPT,
1877 OSPARAMS_PRIVATE_OPT,
1878 OSPARAMS_SECRET_OPT,
1879 OS_SIZE_OPT,
1880 OPPORTUNISTIC_OPT,
1881 SUBMIT_OPT,
1882 PRINT_JOBID_OPT,
1883 TAG_ADD_OPT,
1884 DRY_RUN_OPT,
1885 PRIORITY_OPT,
1886 ]
1887
1888 # common instance policy options
1889 INSTANCE_POLICY_OPTS = [
1890 IPOLICY_BOUNDS_SPECS_OPT,
1891 IPOLICY_DISK_TEMPLATES,
1892 IPOLICY_VCPU_RATIO,
1893 IPOLICY_SPINDLE_RATIO,
1894 ]
1895
1896 # instance policy split specs options
1897 SPLIT_ISPECS_OPTS = [
1898 SPECS_CPU_COUNT_OPT,
1899 SPECS_DISK_COUNT_OPT,
1900 SPECS_DISK_SIZE_OPT,
1901 SPECS_MEM_SIZE_OPT,
1902 SPECS_NIC_COUNT_OPT,
1903 ]
1904
1905
1906 class _ShowUsage(Exception):
1907 """Exception class for L{_ParseArgs}.
1908
1909 """
1910 def __init__(self, exit_error):
1911 """Initializes instances of this class.
1912
1913 @type exit_error: bool
1914 @param exit_error: Whether to report failure on exit
1915
1916 """
1917 Exception.__init__(self)
1918 self.exit_error = exit_error
1919
1920
1921 class _ShowVersion(Exception):
1922 """Exception class for L{_ParseArgs}.
1923
1924 """
1925
1926
1927 def _ParseArgs(binary, argv, commands, aliases, env_override):
1928 """Parser for the command line arguments.
1929
1930 This function parses the arguments and returns the function which
1931 must be executed together with its (modified) arguments.
1932
1933 @param binary: Script name
1934 @param argv: Command line arguments
1935 @param commands: Dictionary containing command definitions
1936 @param aliases: dictionary with command aliases {"alias": "target", ...}
1937 @param env_override: list of env variables allowed for default args
1938 @raise _ShowUsage: If usage description should be shown
1939 @raise _ShowVersion: If version should be shown
1940
1941 """
1942 assert not (env_override - set(commands))
1943 assert not (set(aliases.keys()) & set(commands.keys()))
1944
1945 if len(argv) > 1:
1946 cmd = argv[1]
1947 else:
1948 # No option or command given
1949 raise _ShowUsage(exit_error=True)
1950
1951 if cmd == "--version":
1952 raise _ShowVersion()
1953 elif cmd == "--help":
1954 raise _ShowUsage(exit_error=False)
1955 elif not (cmd in commands or cmd in aliases):
1956 raise _ShowUsage(exit_error=True)
1957
1958 # get command, unalias it, and look it up in commands
1959 if cmd in aliases:
1960 if aliases[cmd] not in commands:
1961 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1962 " command '%s'" % (cmd, aliases[cmd]))
1963
1964 cmd = aliases[cmd]
1965
1966 if cmd in env_override:
1967 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1968 env_args = os.environ.get(args_env_name)
1969 if env_args:
1970 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1971
1972 func, args_def, parser_opts, usage, description = commands[cmd]
1973 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1974 description=description,
1975 formatter=TitledHelpFormatter(),
1976 usage="%%prog %s %s" % (cmd, usage))
1977 parser.disable_interspersed_args()
1978 options, args = parser.parse_args(args=argv[2:])
1979
1980 if not _CheckArguments(cmd, args_def, args):
1981 return None, None, None
1982
1983 return func, options, args
1984
1985
1986 def _FormatUsage(binary, commands):
1987 """Generates a nice description of all commands.
1988
1989 @param binary: Script name
1990 @param commands: Dictionary containing command definitions
1991
1992 """
1993 # compute the max line length for cmd + usage
1994 mlen = min(60, max(map(len, commands)))
1995
1996 yield "Usage: %s {command} [options...] [argument...]" % binary
1997 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1998 yield ""
1999 yield "Commands:"
2000
2001 # and format a nice command list
2002 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
2003 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
2004 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
2005 for line in help_lines:
2006 yield " %-*s %s" % (mlen, "", line)
2007
2008 yield ""
2009
2010
2011 def _CheckArguments(cmd, args_def, args):
2012 """Verifies the arguments using the argument definition.
2013
2014 Algorithm:
2015
2016 1. Abort with error if values specified by user but none expected.
2017
2018 1. For each argument in definition
2019
2020 1. Keep running count of minimum number of values (min_count)
2021 1. Keep running count of maximum number of values (max_count)
2022 1. If it has an unlimited number of values
2023
2024 1. Abort with error if it's not the last argument in the definition
2025
2026 1. If last argument has limited number of values
2027
2028 1. Abort with error if number of values doesn't match or is too large
2029
2030 1. Abort with error if user didn't pass enough values (min_count)
2031
2032 """
2033 if args and not args_def:
2034 ToStderr("Error: Command %s expects no arguments", cmd)
2035 return False
2036
2037 min_count = None
2038 max_count = None
2039 check_max = None
2040
2041 last_idx = len(args_def) - 1
2042
2043 for idx, arg in enumerate(args_def):
2044 if min_count is None:
2045 min_count = arg.min
2046 elif arg.min is not None:
2047 min_count += arg.min
2048
2049 if max_count is None:
2050 max_count = arg.max
2051 elif arg.max is not None:
2052 max_count += arg.max
2053
2054 if idx == last_idx:
2055 check_max = (arg.max is not None)
2056
2057 elif arg.max is None:
2058 raise errors.ProgrammerError("Only the last argument can have max=None")
2059
2060 if check_max:
2061 # Command with exact number of arguments
2062 if (min_count is not None and max_count is not None and
2063 min_count == max_count and len(args) != min_count):
2064 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
2065 return False
2066
2067 # Command with limited number of arguments
2068 if max_count is not None and len(args) > max_count:
2069 ToStderr("Error: Command %s expects only %d argument(s)",
2070 cmd, max_count)
2071 return False
2072
2073 # Command with some required arguments
2074 if min_count is not None and len(args) < min_count:
2075 ToStderr("Error: Command %s expects at least %d argument(s)",
2076 cmd, min_count)
2077 return False
2078
2079 return True
2080
2081
2082 def SplitNodeOption(value):
2083 """Splits the value of a --node option.
2084
2085 """
2086 if value and ":" in value:
2087 return value.split(":", 1)
2088 else:
2089 return (value, None)
2090
2091
2092 def CalculateOSNames(os_name, os_variants):
2093 """Calculates all the names an OS can be called, according to its variants.
2094
2095 @type os_name: string
2096 @param os_name: base name of the os
2097 @type os_variants: list or None
2098 @param os_variants: list of supported variants
2099 @rtype: list
2100 @return: list of valid names
2101
2102 """
2103 if os_variants:
2104 return ["%s+%s" % (os_name, v) for v in os_variants]
2105 else:
2106 return [os_name]
2107
2108
2109 def ParseFields(selected, default):
2110 """Parses the values of "--field"-like options.
2111
2112 @type selected: string or None
2113 @param selected: User-selected options
2114 @type default: list
2115 @param default: Default fields
2116
2117 """
2118 if selected is None:
2119 return default
2120
2121 if selected.startswith("+"):
2122 return default + selected[1:].split(",")
2123
2124 return selected.split(",")
2125
2126
2127 UsesRPC = rpc.RunWithRPC
2128
2129
2130 def AskUser(text, choices=None):
2131 """Ask the user a question.
2132
2133 @param text: the question to ask
2134
2135 @param choices: list with elements tuples (input_char, return_value,
2136 description); if not given, it will default to: [('y', True,
2137 'Perform the operation'), ('n', False, 'Do no do the operation')];
2138 note that the '?' char is reserved for help
2139
2140 @return: one of the return values from the choices list; if input is
2141 not possible (i.e. not running with a tty, we return the last
2142 entry from the list
2143
2144 """
2145 if choices is None:
2146 choices = [("y", True, "Perform the operation"),
2147 ("n", False, "Do not perform the operation")]
2148 if not choices or not isinstance(choices, list):
2149 raise errors.ProgrammerError("Invalid choices argument to AskUser")
2150 for entry in choices:
2151 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
2152 raise errors.ProgrammerError("Invalid choices element to AskUser")
2153
2154 answer = choices[-1][1]
2155 new_text = []
2156 for line in text.splitlines():
2157 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
2158 text = "\n".join(new_text)
2159 try:
2160 f = file("/dev/tty", "a+")
2161 except IOError:
2162 return answer
2163 try:
2164 chars = [entry[0] for entry in choices]
2165 chars[-1] = "[%s]" % chars[-1]
2166 chars.append("?")
2167 maps = dict([(entry[0], entry[1]) for entry in choices])
2168 while True:
2169 f.write(text)
2170 f.write("\n")
2171 f.write("/".join(chars))
2172 f.write(": ")
2173 line = f.readline(2).strip().lower()
2174 if line in maps:
2175 answer = maps[line]
2176 break
2177 elif line == "?":
2178 for entry in choices:
2179 f.write(" %s - %s\n" % (entry[0], entry[2]))
2180 f.write("\n")
2181 continue
2182 finally:
2183 f.close()
2184 return answer
2185
2186
2187 class JobSubmittedException(Exception):
2188 """Job was submitted, client should exit.
2189
2190 This exception has one argument, the ID of the job that was
2191 submitted. The handler should print this ID.
2192
2193 This is not an error, just a structured way to exit from clients.
2194
2195 """
2196
2197
2198 def SendJob(ops, cl=None):
2199 """Function to submit an opcode without waiting for the results.
2200
2201 @type ops: list
2202 @param ops: list of opcodes
2203 @type cl: luxi.Client
2204 @param cl: the luxi client to use for communicating with the master;
2205 if None, a new client will be created
2206
2207 """
2208 if cl is None:
2209 cl = GetClient()
2210
2211 job_id = cl.SubmitJob(ops)
2212
2213 return job_id
2214
2215
2216 def GenericPollJob(job_id, cbs, report_cbs):
2217 """Generic job-polling function.
2218
2219 @type job_id: number
2220 @param job_id: Job ID
2221 @type cbs: Instance of L{JobPollCbBase}
2222 @param cbs: Data callbacks
2223 @type report_cbs: Instance of L{JobPollReportCbBase}
2224 @param report_cbs: Reporting callbacks
2225
2226 """
2227 prev_job_info = None
2228 prev_logmsg_serial = None
2229
2230 status = None
2231
2232 while True:
2233 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2234 prev_logmsg_serial)
2235 if not result:
2236 # job not found, go away!
2237 raise errors.JobLost("Job with id %s lost" % job_id)
2238
2239 if result == constants.JOB_NOTCHANGED:
2240 report_cbs.ReportNotChanged(job_id, status)
2241
2242 # Wait again
2243 continue
2244
2245 # Split result, a tuple of (field values, log entries)
2246 (job_info, log_entries) = result
2247 (status, ) = job_info
2248
2249 if log_entries:
2250 for log_entry in log_entries:
2251 (serial, timestamp, log_type, message) = log_entry
2252 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2253 log_type, message)
2254 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2255
2256 # TODO: Handle canceled and archived jobs
2257 elif status in (constants.JOB_STATUS_SUCCESS,
2258 constants.JOB_STATUS_ERROR,
2259 constants.JOB_STATUS_CANCELING,
2260 constants.JOB_STATUS_CANCELED):
2261 break
2262
2263 prev_job_info = job_info
2264
2265 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2266 if not jobs:
2267 raise errors.JobLost("Job with id %s lost" % job_id)
2268
2269 status, opstatus, result = jobs[0]
2270
2271 if status == constants.JOB_STATUS_SUCCESS:
2272 return result
2273
2274 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2275 raise errors.OpExecError("Job was canceled")
2276
2277 has_ok = False
2278 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2279 if status == constants.OP_STATUS_SUCCESS:
2280 has_ok = True
2281 elif status == constants.OP_STATUS_ERROR:
2282 errors.MaybeRaise(msg)
2283
2284 if has_ok:
2285 raise errors.OpExecError("partial failure (opcode %d): %s" %
2286 (idx, msg))
2287
2288 raise errors.OpExecError(str(msg))
2289
2290 # default failure mode
2291 raise errors.OpExecError(result)
2292
2293
2294 class JobPollCbBase(object):
2295 """Base class for L{GenericPollJob} callbacks.
2296
2297 """
2298 def __init__(self):
2299 """Initializes this class.
2300
2301 """
2302
2303 def WaitForJobChangeOnce(self, job_id, fields,
2304 prev_job_info, prev_log_serial):
2305 """Waits for changes on a job.
2306
2307 """
2308 raise NotImplementedError()
2309
2310 def QueryJobs(self, job_ids, fields):
2311 """Returns the selected fields for the selected job IDs.
2312
2313 @type job_ids: list of numbers
2314 @param job_ids: Job IDs
2315 @type fields: list of strings
2316 @param fields: Fields
2317
2318 """
2319 raise NotImplementedError()
2320
2321
2322 class JobPollReportCbBase(object):
2323 """Base class for L{GenericPollJob} reporting callbacks.
2324
2325 """
2326 def __init__(self):
2327 """Initializes this class.
2328
2329 """
2330
2331 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2332 """Handles a log message.
2333
2334 """
2335 raise NotImplementedError()
2336
2337 def ReportNotChanged(self, job_id, status):
2338 """Called for if a job hasn't changed in a while.
2339
2340 @type job_id: number
2341 @param job_id: Job ID
2342 @type status: string or None
2343 @param status: Job status if available
2344
2345 """
2346 raise NotImplementedError()
2347
2348
2349 class _LuxiJobPollCb(JobPollCbBase):
2350 def __init__(self, cl):
2351 """Initializes this class.
2352
2353 """
2354 JobPollCbBase.__init__(self)
2355 self.cl = cl
2356
2357 def WaitForJobChangeOnce(self, job_id, fields,
2358 prev_job_info, prev_log_serial):
2359 """Waits for changes on a job.
2360
2361 """
2362 return self.cl.WaitForJobChangeOnce(job_id, fields,
2363 prev_job_info, prev_log_serial)
2364
2365 def QueryJobs(self, job_ids, fields):
2366 """Returns the selected fields for the selected job IDs.
2367
2368 """
2369 return self.cl.QueryJobs(job_ids, fields)
2370
2371
2372 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2373 def __init__(self, feedback_fn):
2374 """Initializes this class.
2375
2376 """
2377 JobPollReportCbBase.__init__(self)
2378
2379 self.feedback_fn = feedback_fn
2380
2381 assert callable(feedback_fn)
2382
2383 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2384 """Handles a log message.
2385
2386 """
2387 self.feedback_fn((timestamp, log_type, log_msg))
2388
2389 def ReportNotChanged(self, job_id, status):
2390 """Called if a job hasn't changed in a while.
2391
2392 """
2393 # Ignore
2394
2395
2396 class StdioJobPollReportCb(JobPollReportCbBase):
2397 def __init__(self):
2398 """Initializes this class.
2399
2400 """
2401 JobPollReportCbBase.__init__(self)
2402
2403 self.notified_queued = False
2404 self.notified_waitlock = False
2405
2406 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2407 """Handles a log message.
2408
2409 """
2410 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2411 FormatLogMessage(log_type, log_msg))
2412
2413 def ReportNotChanged(self, job_id, status):
2414 """Called if a job hasn't changed in a while.
2415
2416 """
2417 if status is None:
2418 return
2419
2420 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2421 ToStderr("Job %s is waiting in queue", job_id)
2422 self.notified_queued = True
2423
2424 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2425 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2426 self.notified_waitlock = True
2427
2428
2429 def FormatLogMessage(log_type, log_msg):
2430 """Formats a job message according to its type.
2431
2432 """
2433 if log_type != constants.ELOG_MESSAGE:
2434 log_msg = str(log_msg)
2435
2436 return utils.SafeEncode(log_msg)
2437
2438
2439 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2440 """Function to poll for the result of a job.
2441
2442 @type job_id: job identified
2443 @param job_id: the job to poll for results
2444 @type cl: luxi.Client
2445 @param cl: the luxi client to use for communicating with the master;
2446 if None, a new client will be created
2447
2448 """
2449 if cl is None:
2450 cl = GetClient()
2451
2452 if reporter is None:
2453 if feedback_fn:
2454 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2455 else:
2456 reporter = StdioJobPollReportCb()
2457 elif feedback_fn:
2458 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2459
2460 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2461
2462
2463 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2464 """Legacy function to submit an opcode.
2465
2466 This is just a simple wrapper over the construction of the processor
2467 instance. It should be extended to better handle feedback and
2468 interaction functions.
2469
2470 """
2471 if cl is None:
2472 cl = GetClient()
2473
2474 SetGenericOpcodeOpts([op], opts)
2475
2476 job_id = SendJob([op], cl=cl)
2477 if hasattr(opts, "print_jobid") and opts.print_jobid:
2478 ToStdout("%d" % job_id)
2479
2480 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2481 reporter=reporter)
2482
2483 return op_results[0]
2484
2485
2486 def SubmitOpCodeToDrainedQueue(op):
2487 """Forcefully insert a job in the queue, even if it is drained.
2488
2489 """
2490 cl = GetClient()
2491 job_id = cl.SubmitJobToDrainedQueue([op])
2492 op_results = PollJob(job_id, cl=cl)
2493 return op_results[0]
2494
2495
2496 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2497 """Wrapper around SubmitOpCode or SendJob.
2498
2499 This function will decide, based on the 'opts' parameter, whether to
2500 submit and wait for the result of the opcode (and return it), or
2501 whether to just send the job and print its identifier. It is used in
2502 order to simplify the implementation of the '--submit' option.
2503
2504 It will also process the opcodes if we're sending the via SendJob
2505 (otherwise SubmitOpCode does it).
2506
2507 """
2508 if opts and opts.submit_only:
2509 job = [op]
2510 SetGenericOpcodeOpts(job, opts)
2511 job_id = SendJob(job, cl=cl)
2512 if opts.print_jobid:
2513 ToStdout("%d" % job_id)
2514 raise JobSubmittedException(job_id)
2515 else:
2516 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2517
2518
2519 def _InitReasonTrail(op, opts):
2520 """Builds the first part of the reason trail
2521
2522 Builds the initial part of the reason trail, adding the user provided reason
2523 (if it exists) and the name of the command starting the operation.
2524
2525 @param op: the opcode the reason trail will be added to
2526 @param opts: the command line options selected by the user
2527
2528 """
2529 assert len(sys.argv) >= 2
2530 trail = []
2531
2532 if opts.reason:
2533 trail.append((constants.OPCODE_REASON_SRC_USER,
2534 opts.reason,
2535 utils.EpochNano()))
2536
2537 binary = os.path.basename(sys.argv[0])
2538 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2539 command = sys.argv[1]
2540 trail.append((source, command, utils.EpochNano()))
2541 op.reason = trail
2542
2543
2544 def SetGenericOpcodeOpts(opcode_list, options):
2545 """Processor for generic options.
2546
2547 This function updates the given opcodes based on generic command
2548 line options (like debug, dry-run, etc.).
2549
2550 @param opcode_list: list of opcodes
2551 @param options: command line options or None
2552 @return: None (in-place modification)
2553
2554 """
2555 if not options:
2556 return
2557 for op in opcode_list:
2558 op.debug_level = options.debug
2559 if hasattr(options, "dry_run"):
2560 op.dry_run = options.dry_run
2561 if getattr(options, "priority", None) is not None:
2562 op.priority = options.priority
2563 _InitReasonTrail(op, options)
2564
2565
2566 def FormatError(err):
2567 """Return a formatted error message for a given error.
2568
2569 This function takes an exception instance and returns a tuple
2570 consisting of two values: first, the recommended exit code, and
2571 second, a string describing the error message (not
2572 newline-terminated).
2573
2574 """
2575 retcode = 1
2576 obuf = StringIO()
2577 msg = str(err)
2578 if isinstance(err, errors.ConfigurationError):
2579 txt = "Corrupt configuration file: %s" % msg
2580 logging.error(txt)
2581 obuf.write(txt + "\n")
2582 obuf.write("Aborting.")
2583 retcode = 2
2584 elif isinstance(err, errors.HooksAbort):
2585 obuf.write("Failure: hooks execution failed:\n")
2586 for node, script, out in err.args[0]:
2587 if out:
2588 obuf.write(" node: %s, script: %s, output: %s\n" %
2589 (node, script, out))
2590 else:
2591 obuf.write(" node: %s, script: %s (no output)\n" %
2592 (node, script))
2593 elif isinstance(err, errors.HooksFailure):
2594 obuf.write("Failure: hooks general failure: %s" % msg)
2595 elif isinstance(err, errors.ResolverError):
2596 this_host = netutils.Hostname.GetSysName()
2597 if err.args[0] == this_host:
2598 msg = "Failure: can't resolve my own hostname ('%s')"
2599 else:
2600 msg = "Failure: can't resolve hostname '%s'"
2601 obuf.write(msg % err.args[0])
2602 elif isinstance(err, errors.OpPrereqError):
2603 if len(err.args) == 2:
2604 obuf.write("Failure: prerequisites not met for this"
2605 " operation:\nerror type: %s, error details:\n%s" %
2606 (err.args[1], err.args[0]))
2607 else:
2608 obuf.write("Failure: prerequisites not met for this"
2609 " operation:\n%s" % msg)
2610 elif isinstance(err, errors.OpExecError):
2611 obuf.write("Failure: command execution error:\n%s" % msg)
2612 elif isinstance(err, errors.TagError):
2613 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2614 elif isinstance(err, errors.JobQueueDrainError):
2615 obuf.write("Failure: the job queue is marked for drain and doesn't"
2616 " accept new requests\n")
2617 elif isinstance(err, errors.JobQueueFull):
2618 obuf.write("Failure: the job queue is full and doesn't accept new"
2619 " job submissions until old jobs are archived\n")
2620 elif isinstance(err, errors.TypeEnforcementError):
2621 obuf.write("Parameter Error: %s" % msg)
2622 elif isinstance(err, errors.ParameterError):
2623 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2624 elif isinstance(err, rpcerr.NoMasterError):
2625 if err.args[0] == pathutils.MASTER_SOCKET:
2626 daemon = "the master daemon"
2627 elif err.args[0] == pathutils.QUERY_SOCKET:
2628 daemon = "the config daemon"
2629 else:
2630 daemon = "socket '%s'" % str(err.args[0])
2631 obuf.write("Cannot communicate with %s.\nIs the process running"
2632 " and listening for connections?" % daemon)
2633 elif isinstance(err, rpcerr.TimeoutError):
2634 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2635 " been submitted and will continue to run even if the call"
2636 " timed out. Useful commands in this situation are \"gnt-job"
2637 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2638 obuf.write(msg)
2639 elif isinstance(err, rpcerr.PermissionError):
2640 obuf.write("It seems you don't have permissions to connect to the"
2641 " master daemon.\nPlease retry as a different user.")
2642 elif isinstance(err, rpcerr.ProtocolError):
2643 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2644 "%s" % msg)
2645 elif isinstance(err, errors.JobLost):
2646 obuf.write("Error checking job status: %s" % msg)
2647 elif isinstance(err, errors.QueryFilterParseError):
2648 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2649 obuf.write("\n".join(err.GetDetails()))
2650 elif isinstance(err, errors.GenericError):
2651 obuf.write("Unhandled Ganeti error: %s" % msg)
2652 elif isinstance(err, JobSubmittedException):
2653 obuf.write("JobID: %s\n" % err.args[0])
2654 retcode = 0
2655 else:
2656 obuf.write("Unhandled exception: %s" % msg)
2657 return retcode, obuf.getvalue().rstrip("\n")
2658
2659
2660 def GenericMain(commands, override=None, aliases=None,
2661 env_override=frozenset()):
2662 """Generic main function for all the gnt-* commands.
2663
2664 @param commands: a dictionary with a special structure, see the design doc
2665 for command line handling.
2666 @param override: if not None, we expect a dictionary with keys that will
2667 override command line options; this can be used to pass
2668 options from the scripts to generic functions
2669 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2670 @param env_override: list of environment names which are allowed to submit
2671 default args for commands
2672
2673 """
2674 # save the program name and the entire command line for later logging
2675 if sys.argv:
2676 binary = os.path.basename(sys.argv[0])
2677 if not binary:
2678 binary = sys.argv[0]
2679
2680 if len(sys.argv) >= 2:
2681 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2682 else:
2683 logname = binary
2684
2685 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2686 else:
2687 binary = "<unknown program>"
2688 cmdline = "<unknown>"
2689
2690 if aliases is None:
2691 aliases = {}
2692
2693 try:
2694 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2695 env_override)
2696 except _ShowVersion:
2697 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2698 constants.RELEASE_VERSION)
2699 return constants.EXIT_SUCCESS
2700 except _ShowUsage, err:
2701 for line in _FormatUsage(binary, commands):
2702 ToStdout(line)
2703
2704 if err.exit_error:
2705 return constants.EXIT_FAILURE
2706 else:
2707 return constants.EXIT_SUCCESS
2708 except errors.ParameterError, err:
2709 result, err_msg = FormatError(err)
2710 ToStderr(err_msg)
2711 return 1
2712
2713 if func is None: # parse error
2714 return 1
2715
2716 if override is not None:
2717 for key, val in override.iteritems():
2718 setattr(options, key, val)
2719
2720 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2721 stderr_logging=True)
2722
2723 logging.debug("Command line: %s", cmdline)
2724
2725 try:
2726 result = func(options, args)
2727 except (errors.GenericError, rpcerr.ProtocolError,
2728 JobSubmittedException), err:
2729 result, err_msg = FormatError(err)
2730 logging.exception("Error during command processing")
2731 ToStderr(err_msg)
2732 except KeyboardInterrupt:
2733 result = constants.EXIT_FAILURE
2734 ToStderr("Aborted. Note that if the operation created any jobs, they"
2735 " might have been submitted and"
2736 " will continue to run in the background.")
2737 except IOError, err:
2738 if err.errno == errno.EPIPE:
2739 # our terminal went away, we'll exit
2740 sys.exit(constants.EXIT_FAILURE)
2741 else:
2742 raise
2743
2744 return result
2745
2746
2747 def ParseNicOption(optvalue):
2748 """Parses the value of the --net option(s).
2749
2750 """
2751 try:
2752 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2753 except (TypeError, ValueError), err:
2754 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2755 errors.ECODE_INVAL)
2756
2757 nics = [{}] * nic_max
2758 for nidx, ndict in optvalue:
2759 nidx = int(nidx)
2760
2761 if not isinstance(ndict, dict):
2762 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2763 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2764
2765 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2766
2767 nics[nidx] = ndict
2768
2769 return nics
2770
2771
2772 def FixHvParams(hvparams):
2773 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2774 # comma to space because commas cannot be accepted on the command line
2775 # (they already act as the separator between different hvparams). Still,
2776 # RAPI should be able to accept commas for backwards compatibility.
2777 # Therefore, we convert spaces into commas here, and we keep the old
2778 # parsing logic everywhere else.
2779 try:
2780 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2781 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2782 except KeyError:
2783 #No usb_devices, no modification required
2784 pass
2785
2786
2787 def GenericInstanceCreate(mode, opts, args):
2788 """Add an instance to the cluster via either creation or import.
2789
2790 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2791 @param opts: the command line options selected by the user
2792 @type args: list
2793 @param args: should contain only one element, the new instance name
2794 @rtype: int
2795 @return: the desired exit code
2796
2797 """
2798 instance = args[0]
2799
2800 (pnode, snode) = SplitNodeOption(opts.node)
2801
2802 hypervisor = None
2803 hvparams = {}
2804 if opts.hypervisor:
2805 hypervisor, hvparams = opts.hypervisor
2806
2807 if opts.nics:
2808 nics = ParseNicOption(opts.nics)
2809 elif opts.no_nics:
2810 # no nics
2811 nics = []
2812 elif mode == constants.INSTANCE_CREATE:
2813 # default of one nic, all auto
2814 nics = [{}]
2815 else:
2816 # mode == import
2817 nics = []
2818
2819 if opts.disk_template == constants.DT_DISKLESS:
2820 if opts.disks or opts.sd_size is not None:
2821 raise errors.OpPrereqError("Diskless instance but disk"
2822 " information passed", errors.ECODE_INVAL)
2823 disks = []
2824 else:
2825 if (not opts.disks and not opts.sd_size
2826 and mode == constants.INSTANCE_CREATE):
2827 raise errors.OpPrereqError("No disk information specified",
2828 errors.ECODE_INVAL)
2829 if opts.disks and opts.sd_size is not None:
2830 raise errors.OpPrereqError("Please use either the '--disk' or"
2831 " '-s' option", errors.ECODE_INVAL)
2832 if opts.sd_size is not None:
2833 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2834
2835 if opts.disks:
2836 try:
2837 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2838 except ValueError, err:
2839 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2840 errors.ECODE_INVAL)
2841 disks = [{}] * disk_max
2842 else:
2843 disks = []
2844 for didx, ddict in opts.disks:
2845 didx = int(didx)
2846 if not isinstance(ddict, dict):
2847 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2848 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2849 elif constants.IDISK_SIZE in ddict:
2850 if constants.IDISK_ADOPT in ddict:
2851 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2852 " (disk %d)" % didx, errors.ECODE_INVAL)
2853 try:
2854 ddict[constants.IDISK_SIZE] = \
2855 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2856 except ValueError, err:
2857 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2858 (didx, err), errors.ECODE_INVAL)
2859 elif constants.IDISK_ADOPT in ddict:
2860 if constants.IDISK_SPINDLES in ddict:
2861 raise errors.OpPrereqError("spindles is not a valid option when"
2862 " adopting a disk", errors.ECODE_INVAL)
2863 if mode == constants.INSTANCE_IMPORT:
2864 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2865 " import", errors.ECODE_INVAL)
2866 ddict[constants.IDISK_SIZE] = 0
2867 else:
2868 raise errors.OpPrereqError("Missing size or adoption source for"
2869 " disk %d" % didx, errors.ECODE_INVAL)
2870 if constants.IDISK_SPINDLES in ddict:
2871 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2872
2873 disks[didx] = ddict
2874
2875 if opts.tags is not None:
2876 tags = opts.tags.split(",")
2877 else:
2878 tags = []
2879
2880 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2881 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2882 FixHvParams(hvparams)
2883
2884 osparams_private = opts.osparams_private or serializer.PrivateDict()
2885 osparams_secret = opts.osparams_secret or serializer.PrivateDict()
2886
2887 helper_startup_timeout = opts.helper_startup_timeout
2888 helper_shutdown_timeout = opts.helper_shutdown_timeout
2889
2890 if mode == constants.INSTANCE_CREATE:
2891 start = opts.start
2892 os_type = opts.os
2893 force_variant = opts.force_variant
2894 src_node = None
2895 src_path = None
2896 no_install = opts.no_install
2897 identify_defaults = False
2898 compress = constants.IEC_NONE
2899 if opts.instance_communication is None:
2900 instance_communication = False
2901 else:
2902 instance_communication = opts.instance_communication
2903 elif mode == constants.INSTANCE_IMPORT:
2904 start = False
2905 os_type = None
2906 force_variant = False
2907 src_node = opts.src_node
2908 src_path = opts.src_dir
2909 no_install = None
2910 identify_defaults = opts.identify_defaults
2911 compress = opts.compress
2912 instance_communication = False
2913 else:
2914 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2915
2916 op = opcodes.OpInstanceCreate(
2917 instance_name=instance,
2918 disks=disks,
2919 disk_template=opts.disk_template,
2920 group_name=opts.nodegroup,
2921 nics=nics,
2922 conflicts_check=opts.conflicts_check,
2923 pnode=pnode, snode=snode,
2924 ip_check=opts.ip_check,
2925 name_check=opts.name_check,
2926 wait_for_sync=opts.wait_for_sync,
2927 file_storage_dir=opts.file_storage_dir,
2928 file_driver=opts.file_driver,
2929 iallocator=opts.iallocator,
2930 hypervisor=hypervisor,
2931 hvparams=hvparams,
2932 beparams=opts.beparams,
2933 osparams=opts.osparams,
2934 osparams_private=osparams_private,
2935 osparams_secret=osparams_secret,
2936 mode=mode,
2937 opportunistic_locking=opts.opportunistic_locking,
2938 start=start,
2939 os_type=os_type,
2940 force_variant=force_variant,
2941 src_node=src_node,
2942 src_path=src_path,
2943 compress=compress,
2944 tags=tags,
2945 no_install=no_install,
2946 identify_defaults=identify_defaults,
2947 ignore_ipolicy=opts.ignore_ipolicy,
2948 instance_communication=instance_communication,
2949 helper_startup_timeout=helper_startup_timeout,
2950 helper_shutdown_timeout=helper_shutdown_timeout)
2951
2952 SubmitOrSend(op, opts)
2953 return 0
2954
2955
2956 class _RunWhileClusterStoppedHelper(object):
2957 """Helper class for L{RunWhileClusterStopped} to simplify state management
2958
2959 """
2960 def __init__(self, feedback_fn, cluster_name, master_node,
2961 online_nodes, ssh_ports):
2962 """Initializes this class.
2963
2964 @type feedback_fn: callable
2965 @param feedback_fn: Feedback function
2966 @type cluster_name: string
2967 @param cluster_name: Cluster name
2968 @type master_node: string
2969 @param master_node Master node name
2970 @type online_nodes: list
2971 @param online_nodes: List of names of online nodes
2972 @type ssh_ports: list
2973 @param ssh_ports: List of SSH ports of online nodes
2974
2975 """
2976 self.feedback_fn = feedback_fn
2977 self.cluster_name = cluster_name
2978 self.master_node = master_node
2979 self.online_nodes = online_nodes
2980 self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2981
2982 self.ssh = ssh.SshRunner(self.cluster_name)
2983
2984 self.nonmaster_nodes = [name for name in online_nodes
2985 if name != master_node]
2986
2987 assert self.master_node not in self.nonmaster_nodes
2988
2989 def _RunCmd(self, node_name, cmd):
2990 """Runs a command on the local or a remote machine.
2991
2992 @type node_name: string
2993 @param node_name: Machine name
2994 @type cmd: list
2995 @param cmd: Command
2996
2997 """
2998 if node_name is None or node_name == self.master_node:
2999 # No need to use SSH
3000 result = utils.RunCmd(cmd)
3001 else:
3002 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
3003 utils.ShellQuoteArgs(cmd),
3004 port=self.ssh_ports[node_name])
3005
3006 if result.failed:
3007 errmsg = ["Failed to run command %s" % result.cmd]
3008 if node_name:
3009 errmsg.append("on node %s" % node_name)
3010 errmsg.append(": exitcode %s and error %s" %
3011 (result.exit_code, result.output))
3012 raise errors.OpExecError(" ".join(errmsg))
3013
3014 def Call(self, fn, *args):
3015 """Call function while all daemons are stopped.
3016
3017 @type fn: callable
3018 @param fn: Function to be called
3019
3020 """
3021 # Pause watcher by acquiring an exclusive lock on watcher state file
3022 self.feedback_fn("Blocking watcher")
3023 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
3024 try:
3025 # TODO: Currently, this just blocks. There's no timeout.
3026 # TODO: Should it be a shared lock?
3027 watcher_block.Exclusive(blocking=True)
3028
3029 # Stop master daemons, so that no new jobs can come in and all running
3030 # ones are finished
3031 self.feedback_fn("Stopping master daemons")
3032 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
3033 try:
3034 # Stop daemons on all nodes
3035 for node_name in self.online_nodes:
3036 self.feedback_fn("Stopping daemons on %s" % node_name)
3037 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
3038
3039 # All daemons are shut down now
3040 try:
3041 return fn(self, *args)
3042 except Exception, err:
3043 _, errmsg = FormatError(err)
3044 logging.exception("Caught exception")
3045 self.feedback_fn(errmsg)
3046 raise
3047 finally:
3048 # Start cluster again, master node last
3049 for node_name in self.nonmaster_nodes + [self.master_node]:
3050 self.feedback_fn("Starting daemons on %s" % node_name)
3051 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
3052 finally:
3053 # Resume watcher
3054 watcher_block.Close()
3055
3056
3057 def RunWhileClusterStopped(feedback_fn, fn, *args):
3058 """Calls a function while all cluster daemons are stopped.
3059
3060 @type feedback_fn: callable
3061 @param feedback_fn: Feedback function
3062 @type fn: callable
3063 @param fn: Function to be called when daemons are stopped
3064
3065 """
3066 feedback_fn("Gathering cluster information")
3067
3068 # This ensures we're running on the master daemon
3069 cl = GetClient()
3070
3071 (cluster_name, master_node) = \
3072 cl.QueryConfigValues(["cluster_name", "master_node"])
3073
3074 online_nodes = GetOnlineNodes([], cl=cl)
3075 ssh_ports = GetNodesSshPorts(online_nodes, cl)
3076
3077 # Don't keep a reference to the client. The master daemon will go away.
3078 del cl
3079
3080 assert master_node in online_nodes
3081
3082 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
3083 online_nodes, ssh_ports).Call(fn, *args)
3084
3085
3086 def GenerateTable(headers, fields, separator, data,
3087 numfields=None, unitfields=None,
3088 units=None):
3089 """Prints a table with headers and different fields.
3090
3091 @type headers: dict
3092 @param headers: dictionary mapping field names to headers for
3093 the table
3094 @type fields: list
3095 @param fields: the field names corresponding to each row in
3096 the data field
3097 @param separator: the separator to be used; if this is None,
3098 the default 'smart' algorithm is used which computes optimal
3099 field width, otherwise just the separator is used between
3100 each field
3101 @type data: list
3102 @param data: a list of lists, each sublist being one row to be output
3103 @type numfields: list
3104 @param numfields: a list with the fields that hold numeric
3105 values and thus should be right-aligned
3106 @type unitfields: list
3107 @param unitfields: a list with the fields that hold numeric
3108 values that should be formatted with the units field
3109 @type units: string or None
3110 @param units: the units we should use for formatting, or None for
3111 automatic choice (human-readable for non-separator usage, otherwise
3112 megabytes); this is a one-letter string
3113
3114 """
3115 if units is None:
3116 if separator:
3117 units = "m"
3118 else:
3119 units = "h"
3120
3121 if numfields is None:
3122 numfields = []
3123 if unitfields is None:
3124 unitfields = []
3125
3126 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
3127 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
3128
3129 format_fields = []
3130 for field in fields:
3131 if headers and field not in headers:
3132 # TODO: handle better unknown fields (either revert to old
3133 # style of raising exception, or deal more intelligently with
3134 # variable fields)
3135 headers[field] = field
3136 if separator is not None:
3137 format_fields.append("%s")
3138 elif numfields.Matches(field):
3139 format_fields.append("%*s")
3140 else:
3141 format_fields.append("%-*s")
3142
3143 if separator is None:
3144 mlens = [0 for name in fields]
3145 format_str = " ".join(format_fields)
3146 else:
3147 format_str = separator.replace("%", "%%").join(format_fields)
3148
3149 for row in data:
3150 if row is None:
3151 continue
3152 for idx, val in enumerate(row):
3153 if unitfields.Matches(fields[idx]):
3154 try:
3155 val = int(val)
3156 except (TypeError, ValueError):
3157 pass
3158 else:
3159 val = row[idx] = utils.FormatUnit(val, units)
3160 val = row[idx] = str(val)
3161 if separator is None:
3162 mlens[idx] = max(mlens[idx], len(val))
3163
3164 result = []
3165 if headers:
3166 args = []
3167 for idx, name in enumerate(fields):
3168 hdr = headers[name]
3169 if separator is None:
3170 mlens[idx] = max(mlens[idx], len(hdr))
3171 args.append(mlens[idx])
3172 args.append(hdr)
3173 result.append(format_str % tuple(args))
3174
3175 if separator is None:
3176 assert len(mlens) == len(fields)
3177
3178 if fields and not numfields.Matches(fields[-1]):
3179 mlens[-1] = 0
3180
3181 for line in data:
3182 args = []
3183 if line is None:
3184 line = ["-" for _ in fields]
3185 for idx in range(len(fields)):
3186 if separator is None:
3187 args.append(mlens[idx])
3188 args.append(line[idx])
3189 result.append(format_str % tuple(args))
3190
3191 return result
3192
3193
3194 def _FormatBool(value):
3195 """Formats a boolean value as a string.
3196
3197 """
3198 if value:
3199 return "Y"
3200 return "N"
3201
3202
3203 #: Default formatting for query results; (callback, align right)
3204 _DEFAULT_FORMAT_QUERY = {
3205 constants.QFT_TEXT: (str, False),
3206 constants.QFT_BOOL: (_FormatBool, False),
3207 constants.QFT_NUMBER: (str, True),
3208 constants.QFT_NUMBER_FLOAT: (str, True),
3209 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3210 constants.QFT_OTHER: (str, False),
3211 constants.QFT_UNKNOWN: (str, False),
3212 }
3213
3214
3215 def _GetColumnFormatter(fdef, override, unit):
3216 """Returns formatting function for a field.
3217
3218 @type fdef: L{objects.QueryFieldDefinition}
3219 @type override: dict
3220 @param override: Dictionary for overriding field formatting functions,
3221 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3222 @type unit: string
3223 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3224 @rtype: tuple; (callable, bool)
3225 @return: Returns the function to format a value (takes one parameter) and a
3226 boolean for aligning the value on the right-hand side
3227
3228 """
3229 fmt = override.get(fdef.name, None)
3230 if fmt is not None:
3231 return fmt
3232
3233 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3234
3235 if fdef.kind == constants.QFT_UNIT:
3236 # Can't keep this information in the static dictionary
3237 return (lambda value: utils.FormatUnit(value, unit), True)
3238
3239 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3240 if fmt is not None:
3241 return fmt
3242
3243 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3244
3245
3246 class _QueryColumnFormatter(object):
3247 """Callable class for formatting fields of a query.
3248
3249 """
3250 def __init__(self, fn, status_fn, verbose):
3251 """Initializes this class.
3252
3253 @type fn: callable
3254 @param fn: Formatting function
3255 @type status_fn: callable
3256 @param status_fn: Function to report fields' status
3257 @type verbose: boolean
3258 @param verbose: whether to use verbose field descriptions or not
3259
3260 """
3261 self._fn = fn
3262 self._status_fn = status_fn
3263 self._verbose = verbose
3264
3265 def __call__(self, data):
3266 """Returns a field's string representation.
3267
3268 """
3269 (status, value) = data
3270
3271 # Report status
3272 self._status_fn(status)
3273
3274 if status == constants.RS_NORMAL:
3275 return self._fn(value)
3276
3277 assert value is None, \
3278 "Found value %r for abnormal status %s" % (value, status)
3279
3280 return FormatResultError(status, self._verbose)
3281
3282
3283 def FormatResultError(status, verbose):
3284 """Formats result status other than L{constants.RS_NORMAL}.
3285
3286 @param status: The result status
3287 @type verbose: boolean
3288 @param verbose: Whether to return the verbose text
3289 @return: Text of result status
3290
3291 """
3292 assert status != constants.RS_NORMAL, \
3293 "FormatResultError called with status equal to constants.RS_NORMAL"
3294 try:
3295 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3296 except KeyError:
3297 raise NotImplementedError("Unknown status %s" % status)
3298 else:
3299 if verbose:
3300 return verbose_text
3301 return normal_text
3302
3303
3304 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3305 header=False, verbose=False):
3306 """Formats data in L{objects.QueryResponse}.
3307
3308 @type result: L{objects.QueryResponse}
3309 @param result: result of query operation
3310 @type unit: string
3311 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3312 see L{utils.text.FormatUnit}
3313 @type format_override: dict
3314 @param format_override: Dictionary for overriding field formatting functions,
3315 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3316 @type separator: string or None
3317 @param separator: String used to separate fields
3318 @type header: bool
3319 @param header: Whether to output header row
3320 @type verbose: boolean
3321 @param verbose: whether to use verbose field descriptions or not
3322
3323 """
3324 if unit is None:
3325 if separator:
3326 unit = "m"
3327 else:
3328 unit = "h"
3329
3330 if format_override is None:
3331 format_override = {}
3332
3333 stats = dict.fromkeys(constants.RS_ALL, 0)
3334
3335 def _RecordStatus(status):
3336 if status in stats:
3337 stats[status] += 1
3338
3339 columns = []
3340 for fdef in result.fields:
3341 assert fdef.title and fdef.name
3342 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3343 columns.append(TableColumn(fdef.title,
3344 _QueryColumnFormatter(fn, _RecordStatus,
3345 verbose),
3346 align_right))
3347
3348 table = FormatTable(result.data, columns, header, separator)
3349
3350 # Collect statistics
3351 assert len(stats) == len(constants.RS_ALL)
3352 assert compat.all(count >= 0 for count in stats.values())
3353
3354 # Determine overall status. If there was no data, unknown fields must be
3355 # detected via the field definitions.
3356 if (stats[constants.RS_UNKNOWN] or
3357 (not result.data and _GetUnknownFields(result.fields))):
3358 status = QR_UNKNOWN
3359 elif compat.any(count > 0 for key, count in stats.items()
3360 if key != constants.RS_NORMAL):
3361 status = QR_INCOMPLETE
3362 else:
3363 status = QR_NORMAL
3364
3365 return (status, table)
3366
3367
3368 def _GetUnknownFields(fdefs):
3369 """Returns list of unknown fields included in C{fdefs}.
3370
3371 @type fdefs: list of L{objects.QueryFieldDefinition}
3372
3373 """
3374 return [fdef for fdef in fdefs
3375 if fdef.kind == constants.QFT_UNKNOWN]
3376
3377
3378 def _WarnUnknownFields(fdefs):
3379 """Prints a warning to stderr if a query included unknown fields.
3380
3381 @type fdefs: list of L{objects.QueryFieldDefinition}
3382
3383 """
3384 unknown = _GetUnknownFields(fdefs)
3385 if unknown:
3386 ToStderr("Warning: Queried for unknown fields %s",
3387 utils.CommaJoin(fdef.name for fdef in unknown))
3388 return True
3389
3390 return False
3391
3392
3393 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3394 format_override=None, verbose=False, force_filter=False,
3395 namefield=None, qfilter=None, isnumeric=False):
3396 """Generic implementation for listing all items of a resource.
3397
3398 @param resource: One of L{constants.QR_VIA_LUXI}
3399 @type fields: list of strings
3400 @param fields: List of fields to query for
3401 @type names: list of strings
3402 @param names: Names of items to query for
3403 @type unit: string or None
3404 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3405 None for automatic choice (human-readable for non-separator usage,
3406 otherwise megabytes); this is a one-letter string
3407 @type separator: string or None
3408 @param separator: String used to separate fields
3409 @type header: bool
3410 @param header: Whether to show header row
3411 @type force_filter: bool
3412 @param force_filter: Whether to always treat names as filter
3413 @type format_override: dict
3414 @param format_override: Dictionary for overriding field formatting functions,
3415 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3416 @type verbose: boolean
3417 @param verbose: whether to use verbose field descriptions or not
3418 @type namefield: string
3419 @param namefield: Name of field to use for simple filters (see
3420 L{qlang.MakeFilter} for details)
3421 @type qfilter: list or None
3422 @param qfilter: Query filter (in addition to names)
3423 @param isnumeric: bool
3424 @param isnumeric: Whether the namefield's type is numeric, and therefore
3425 any simple filters built by namefield should use integer values to
3426 reflect that
3427
3428 """
3429 if not names:
3430 names = None
3431
3432 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3433 isnumeric=isnumeric)
3434
3435 if qfilter is None:
3436 qfilter = namefilter
3437 elif namefilter is not None:
3438 qfilter = [qlang.OP_AND, namefilter, qfilter]
3439
3440 if cl is None:
3441 cl = GetClient()
3442
3443 response = cl.Query(resource, fields, qfilter)
3444
3445 found_unknown = _WarnUnknownFields(response.fields)
3446
3447 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3448 header=header,
3449 format_override=format_override,
3450 verbose=verbose)
3451
3452 for line in data:
3453 ToStdout(line)
3454
3455 assert ((found_unknown and status == QR_UNKNOWN) or
3456 (not found_unknown and status != QR_UNKNOWN))
3457
3458 if status == QR_UNKNOWN:
3459 return constants.EXIT_UNKNOWN_FIELD
3460
3461 # TODO: Should the list command fail if not all data could be collected?
3462 return constants.EXIT_SUCCESS
3463
3464
3465 def _FieldDescValues(fdef):
3466 """Helper function for L{GenericListFields} to get query field description.
3467
3468 @type fdef: L{objects.QueryFieldDefinition}
3469 @rtype: list
3470
3471 """
3472 return [
3473 fdef.name,
3474 _QFT_NAMES.get(fdef.kind, fdef.kind),
3475 fdef.title,
3476 fdef.doc,
3477 ]
3478
3479
3480 def GenericListFields(resource, fields, separator, header, cl=None):
3481 """Generic implementation for listing fields for a resource.
3482
3483 @param resource: One of L{constants.QR_VIA_LUXI}
3484 @type fields: list of strings
3485 @param fields: List of fields to query for
3486 @type separator: string or None
3487 @param separator: String used to separate fields
3488 @type header: bool
3489 @param header: Whether to show header row
3490
3491 """
3492 if cl is None:
3493 cl = GetClient()
3494
3495 if not fields:
3496 fields = None
3497
3498 response = cl.QueryFields(resource, fields)
3499
3500 found_unknown = _WarnUnknownFields(response.fields)
3501
3502 columns = [
3503 TableColumn("Name", str, False),
3504 TableColumn("Type", str, False),
3505 TableColumn("Title", str, False),
3506 TableColumn("Description", str, False),
3507 ]
3508
3509 rows = map(_FieldDescValues, response.fields)
3510
3511 for line in FormatTable(rows, columns, header, separator):
3512 ToStdout(line)
3513
3514 if found_unknown:
3515 return constants.EXIT_UNKNOWN_FIELD
3516
3517 return constants.EXIT_SUCCESS
3518
3519
3520 class TableColumn(object):
3521 """Describes a column for L{FormatTable}.
3522
3523 """
3524 def __init__(self, title, fn, align_right):
3525 """Initializes this class.
3526
3527 @type title: string
3528 @param title: Column title
3529 @type fn: callable
3530 @param fn: Formatting function
3531 @type align_right: bool
3532 @param align_right: Whether to align values on the right-hand side
3533
3534 """
3535 self.title = title
3536 self.format = fn
3537 self.align_right = align_right
3538
3539
3540 def _GetColFormatString(width, align_right):
3541 """Returns the format string for a field.
3542
3543 """
3544 if align_right:
3545 sign = ""
3546 else:
3547 sign = "-"
3548
3549 return "%%%s%ss" % (sign, width)
3550
3551
3552 def FormatTable(rows, columns, header, separator):
3553 """Formats data as a table.
3554
3555 @type rows: list of lists
3556 @param rows: Row data, one list per row
3557 @type columns: list of L{TableColumn}
3558 @param columns: Column descriptions
3559 @type header: bool
3560 @param header: Whether to show header row
3561 @type separator: string or None
3562 @param separator: String used to separate columns
3563
3564 """
3565 if header:
3566 data = [[col.title for col in columns]]
3567 colwidth = [len(col.title) for col in columns]
3568 else:
3569 data = []
3570 colwidth = [0 for _ in columns]
3571
3572 # Format row data
3573 for row in rows:
3574 assert len(row) == len(columns)
3575
3576 formatted = [col.format(value) for value, col in zip(row, columns)]
3577
3578 if separator is None:
3579 # Update column widths
3580 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3581 # Modifying a list's items while iterating is fine
3582 colwidth[idx] = max(oldwidth, len(value))
3583
3584 data.append(formatted)
3585
3586 if separator is not None:
3587 # Return early if a separator is used
3588 return [separator.join(row) for row in data]
3589
3590 if columns and not columns[-1].align_right:
3591 # Avoid unnecessary spaces at end of line
3592 colwidth[-1] = 0
3593
3594 # Build format string
3595 fmt = " ".join([_GetColFormatString(width, col.align_right)
3596 for col, width in zip(columns, colwidth)])
3597
3598 return [fmt % tuple(row) for row in data]
3599
3600
3601 def FormatTimestamp(ts):
3602 """Formats a given timestamp.
3603
3604 @type ts: timestamp
3605 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3606
3607 @rtype: string
3608 @return: a string with the formatted timestamp
3609
3610 """
3611 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3612 return "?"
3613
3614 (sec, usecs) = ts
3615 return utils.FormatTime(sec, usecs=usecs)
3616
3617
3618 def ParseTimespec(value):
3619 """Parse a time specification.
3620
3621 The following suffixed will be recognized:
3622
3623 - s: seconds
3624 - m: minutes
3625 - h: hours
3626 - d: day
3627 - w: weeks
3628
3629 Without any suffix, the value will be taken to be in seconds.
3630
3631 """
3632 value = str(value)
3633 if not value:
3634 raise errors.OpPrereqError("Empty time specification passed",
3635 errors.ECODE_INVAL)
3636 suffix_map = {
3637 "s": 1,
3638 "m": 60,
3639 "h": 3600,
3640 "d": 86400,
3641 "w": 604800,
3642 }
3643 if value[-1] not in suffix_map:
3644 try:
3645 value = int(value)
3646 except (TypeError, ValueError):
3647 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3648 errors.ECODE_INVAL)
3649 else:
3650 multiplier = suffix_map[value[-1]]
3651 value = value[:-1]
3652 if not value: # no data left after stripping the suffix
3653 raise errors.OpPrereqError("Invalid time specification (only"
3654 " suffix passed)", errors.ECODE_INVAL)
3655 try:
3656 value = int(value) * multiplier
3657 except (TypeError, ValueError):
3658 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3659 errors.ECODE_INVAL)
3660 return value
3661
3662
3663 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3664 filter_master=False, nodegroup=None):
3665 """Returns the names of online nodes.
3666
3667 This function will also log a warning on stderr with the names of
3668 the online nodes.
3669
3670 @param nodes: if not empty, use only this subset of nodes (minus the
3671 offline ones)
3672 @param cl: if not None, luxi client to use
3673 @type nowarn: boolean
3674 @param nowarn: by default, this function will output a note with the
3675 offline nodes that are skipped; if this parameter is True the
3676 note is not displayed
3677 @type secondary_ips: boolean
3678 @param secondary_ips: if True, return the secondary IPs instead of the
3679 names, useful for doing network traffic over the replication interface
3680 (if any)
3681 @type filter_master: boolean
3682 @param filter_master: if True, do not return the master node in the list
3683 (useful in coordination with secondary_ips where we cannot check our
3684 node name against the list)
3685 @type nodegroup: string
3686 @param nodegroup: If set, only return nodes in this node group
3687
3688 """
3689 if cl is None:
3690 cl = GetClient()
3691
3692 qfilter = []
3693
3694 if nodes:
3695 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3696
3697 if nodegroup is not None:
3698 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3699 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3700
3701 if filter_master:
3702 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3703
3704 if qfilter:
3705 if len(qfilter) > 1:
3706 final_filter = [qlang.OP_AND] + qfilter
3707 else:
3708 assert len(qfilter) == 1
3709 final_filter = qfilter[0]
3710 else:
3711 final_filter = None
3712
3713 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3714
3715 def _IsOffline(row):
3716 (_, (_, offline), _) = row
3717 return offline
3718
3719 def _GetName(row):
3720 ((_, name), _, _) = row
3721 return name
3722
3723 def _GetSip(row):
3724 (_, _, (_, sip)) = row
3725 return sip
3726
3727 (offline, online) = compat.partition(result.data, _IsOffline)
3728
3729 if offline and not nowarn:
3730 ToStderr("Note: skipping offline node(s): %s" %
3731 utils.CommaJoin(map(_GetName, offline)))
3732
3733 if secondary_ips:
3734 fn = _GetSip
3735 else:
3736 fn = _GetName
3737
3738 return map(fn, online)
3739
3740
3741 def GetNodesSshPorts(nodes, cl):
3742 """Retrieves SSH ports of given nodes.
3743
3744 @param nodes: the names of nodes
3745 @type nodes: a list of strings
3746 @param cl: a client to use for the query
3747 @type cl: L{ganeti.luxi.Client}
3748 @return: the list of SSH ports corresponding to the nodes
3749 @rtype: a list of tuples
3750 """
3751 return map(lambda t: t[0],
3752 cl.QueryNodes(names=nodes,
3753 fields=["ndp/ssh_port"],
3754 use_locking=False))
3755
3756
3757 def _ToStream(stream, txt, *args):
3758 """Write a message to a stream, bypassing the logging system
3759
3760 @type stream: file object
3761 @param stream: the file to which we should write
3762 @type txt: str
3763 @param txt: the message
3764
3765 """
3766 try:
3767 if args:
3768 args = tuple(args)
3769 stream.write(txt % args)
3770 else:
3771 stream.write(txt)
3772 stream.write("\n")
3773 stream.flush()
3774 except IOError, err:
3775 if err.errno == errno.EPIPE:
3776 # our terminal went away, we'll exit
3777 sys.exit(constants.EXIT_FAILURE)
3778 else:
3779 raise
3780