657308e1e485199758a8af00252c7f787f1cd46e
[ganeti-github.git] / lib / cli.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Module dealing with command line parsing"""
32
33
34 import sys
35 import textwrap
36 import os.path
37 import time
38 import logging
39 import errno
40 import itertools
41 import shlex
42 from cStringIO import StringIO
43
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import opcodes
48 import ganeti.rpc.errors as rpcerr
49 import ganeti.rpc.node as rpc
50 from ganeti import ssh
51 from ganeti import compat
52 from ganeti import netutils
53 from ganeti import qlang
54 from ganeti import objects
55 from ganeti import pathutils
56 from ganeti import serializer
57
58 from ganeti.runtime import (GetClient)
59
60 from optparse import (OptionParser, TitledHelpFormatter,
61 Option, OptionValueError)
62
63
64 __all__ = [
65 # Command line options
66 "ABSOLUTE_OPT",
67 "ADD_UIDS_OPT",
68 "ADD_RESERVED_IPS_OPT",
69 "ALLOCATABLE_OPT",
70 "ALLOC_POLICY_OPT",
71 "ALL_OPT",
72 "ALLOW_FAILOVER_OPT",
73 "AUTO_PROMOTE_OPT",
74 "AUTO_REPLACE_OPT",
75 "BACKEND_OPT",
76 "BLK_OS_OPT",
77 "CAPAB_MASTER_OPT",
78 "CAPAB_VM_OPT",
79 "CLEANUP_OPT",
80 "CLUSTER_DOMAIN_SECRET_OPT",
81 "CONFIRM_OPT",
82 "CP_SIZE_OPT",
83 "COMPRESSION_TOOLS_OPT",
84 "DEBUG_OPT",
85 "DEBUG_SIMERR_OPT",
86 "DISKIDX_OPT",
87 "DISK_OPT",
88 "DISK_PARAMS_OPT",
89 "DISK_TEMPLATE_OPT",
90 "DRAINED_OPT",
91 "DRY_RUN_OPT",
92 "DRBD_HELPER_OPT",
93 "DST_NODE_OPT",
94 "EARLY_RELEASE_OPT",
95 "ENABLED_HV_OPT",
96 "ENABLED_DISK_TEMPLATES_OPT",
97 "ENABLED_USER_SHUTDOWN_OPT",
98 "ERROR_CODES_OPT",
99 "FAILURE_ONLY_OPT",
100 "FIELDS_OPT",
101 "FILESTORE_DIR_OPT",
102 "FILESTORE_DRIVER_OPT",
103 "FORCE_FAILOVER_OPT",
104 "FORCE_FILTER_OPT",
105 "FORCE_OPT",
106 "FORCE_VARIANT_OPT",
107 "GATEWAY_OPT",
108 "GATEWAY6_OPT",
109 "GLOBAL_FILEDIR_OPT",
110 "HID_OS_OPT",
111 "GLOBAL_GLUSTER_FILEDIR_OPT",
112 "GLOBAL_SHARED_FILEDIR_OPT",
113 "HOTPLUG_OPT",
114 "HOTPLUG_IF_POSSIBLE_OPT",
115 "HVLIST_OPT",
116 "HVOPTS_OPT",
117 "HYPERVISOR_OPT",
118 "IALLOCATOR_OPT",
119 "DEFAULT_IALLOCATOR_OPT",
120 "DEFAULT_IALLOCATOR_PARAMS_OPT",
121 "IDENTIFY_DEFAULTS_OPT",
122 "IGNORE_CONSIST_OPT",
123 "IGNORE_ERRORS_OPT",
124 "IGNORE_FAILURES_OPT",
125 "IGNORE_OFFLINE_OPT",
126 "IGNORE_REMOVE_FAILURES_OPT",
127 "IGNORE_SECONDARIES_OPT",
128 "IGNORE_SIZE_OPT",
129 "INCLUDEDEFAULTS_OPT",
130 "INTERVAL_OPT",
131 "INSTALL_IMAGE_OPT",
132 "INSTANCE_COMMUNICATION_OPT",
133 "INSTANCE_COMMUNICATION_NETWORK_OPT",
134 "MAC_PREFIX_OPT",
135 "MAINTAIN_NODE_HEALTH_OPT",
136 "MASTER_NETDEV_OPT",
137 "MASTER_NETMASK_OPT",
138 "MAX_TRACK_OPT",
139 "MC_OPT",
140 "MIGRATION_MODE_OPT",
141 "MODIFY_ETCHOSTS_OPT",
142 "NET_OPT",
143 "NETWORK_OPT",
144 "NETWORK6_OPT",
145 "NEW_CLUSTER_CERT_OPT",
146 "NEW_NODE_CERT_OPT",
147 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
148 "NEW_CONFD_HMAC_KEY_OPT",
149 "NEW_RAPI_CERT_OPT",
150 "NEW_PRIMARY_OPT",
151 "NEW_SECONDARY_OPT",
152 "NEW_SPICE_CERT_OPT",
153 "NIC_PARAMS_OPT",
154 "NOCONFLICTSCHECK_OPT",
155 "NODE_FORCE_JOIN_OPT",
156 "NODE_LIST_OPT",
157 "NODE_PLACEMENT_OPT",
158 "NODEGROUP_OPT",
159 "NODE_PARAMS_OPT",
160 "NODE_POWERED_OPT",
161 "NOHDR_OPT",
162 "NOIPCHECK_OPT",
163 "NO_INSTALL_OPT",
164 "NONAMECHECK_OPT",
165 "NOMODIFY_ETCHOSTS_OPT",
166 "NOMODIFY_SSH_SETUP_OPT",
167 "NONICS_OPT",
168 "NONLIVE_OPT",
169 "NONPLUS1_OPT",
170 "NORUNTIME_CHGS_OPT",
171 "NOSHUTDOWN_OPT",
172 "NOSTART_OPT",
173 "NOSSH_KEYCHECK_OPT",
174 "NOVOTING_OPT",
175 "NO_REMEMBER_OPT",
176 "NWSYNC_OPT",
177 "OFFLINE_INST_OPT",
178 "ONLINE_INST_OPT",
179 "ON_PRIMARY_OPT",
180 "ON_SECONDARY_OPT",
181 "OFFLINE_OPT",
182 "OS_OPT",
183 "OSPARAMS_OPT",
184 "OSPARAMS_PRIVATE_OPT",
185 "OSPARAMS_SECRET_OPT",
186 "OS_SIZE_OPT",
187 "OOB_TIMEOUT_OPT",
188 "POWER_DELAY_OPT",
189 "PREALLOC_WIPE_DISKS_OPT",
190 "PRIMARY_IP_VERSION_OPT",
191 "PRIMARY_ONLY_OPT",
192 "PRINT_JOBID_OPT",
193 "PRIORITY_OPT",
194 "RAPI_CERT_OPT",
195 "READD_OPT",
196 "REASON_OPT",
197 "REBOOT_TYPE_OPT",
198 "REMOVE_INSTANCE_OPT",
199 "REMOVE_RESERVED_IPS_OPT",
200 "REMOVE_UIDS_OPT",
201 "RESERVED_LVS_OPT",
202 "RQL_OPT",
203 "RUNTIME_MEM_OPT",
204 "ROMAN_OPT",
205 "SECONDARY_IP_OPT",
206 "SECONDARY_ONLY_OPT",
207 "SELECT_OS_OPT",
208 "SEP_OPT",
209 "SHOWCMD_OPT",
210 "SHOW_MACHINE_OPT",
211 "COMPRESS_OPT",
212 "TRANSPORT_COMPRESSION_OPT",
213 "SHUTDOWN_TIMEOUT_OPT",
214 "SINGLE_NODE_OPT",
215 "SPECS_CPU_COUNT_OPT",
216 "SPECS_DISK_COUNT_OPT",
217 "SPECS_DISK_SIZE_OPT",
218 "SPECS_MEM_SIZE_OPT",
219 "SPECS_NIC_COUNT_OPT",
220 "SPLIT_ISPECS_OPTS",
221 "IPOLICY_STD_SPECS_OPT",
222 "IPOLICY_DISK_TEMPLATES",
223 "IPOLICY_VCPU_RATIO",
224 "SEQUENTIAL_OPT",
225 "SPICE_CACERT_OPT",
226 "SPICE_CERT_OPT",
227 "SRC_DIR_OPT",
228 "SRC_NODE_OPT",
229 "SUBMIT_OPT",
230 "SUBMIT_OPTS",
231 "STARTUP_PAUSED_OPT",
232 "STATIC_OPT",
233 "SYNC_OPT",
234 "TAG_ADD_OPT",
235 "TAG_SRC_OPT",
236 "TIMEOUT_OPT",
237 "TO_GROUP_OPT",
238 "UIDPOOL_OPT",
239 "USEUNITS_OPT",
240 "USE_EXTERNAL_MIP_SCRIPT",
241 "USE_REPL_NET_OPT",
242 "VERBOSE_OPT",
243 "VG_NAME_OPT",
244 "WFSYNC_OPT",
245 "YES_DOIT_OPT",
246 "ZEROING_IMAGE_OPT",
247 "ZERO_FREE_SPACE_OPT",
248 "HELPER_STARTUP_TIMEOUT_OPT",
249 "HELPER_SHUTDOWN_TIMEOUT_OPT",
250 "ZEROING_TIMEOUT_FIXED_OPT",
251 "ZEROING_TIMEOUT_PER_MIB_OPT",
252 "DISK_STATE_OPT",
253 "HV_STATE_OPT",
254 "IGNORE_IPOLICY_OPT",
255 "INSTANCE_POLICY_OPTS",
256 # Generic functions for CLI programs
257 "ConfirmOperation",
258 "CreateIPolicyFromOpts",
259 "GenericMain",
260 "GenericInstanceCreate",
261 "GenericList",
262 "GenericListFields",
263 "GetClient",
264 "GetOnlineNodes",
265 "GetNodesSshPorts",
266 "JobExecutor",
267 "JobSubmittedException",
268 "ParseTimespec",
269 "RunWhileClusterStopped",
270 "RunWhileDaemonsStopped",
271 "SubmitOpCode",
272 "SubmitOpCodeToDrainedQueue",
273 "SubmitOrSend",
274 "UsesRPC",
275 # Formatting functions
276 "ToStderr", "ToStdout",
277 "FormatError",
278 "FormatQueryResult",
279 "FormatParamsDictInfo",
280 "FormatPolicyInfo",
281 "PrintIPolicyCommand",
282 "PrintGenericInfo",
283 "GenerateTable",
284 "AskUser",
285 "FormatTimestamp",
286 "FormatLogMessage",
287 # Tags functions
288 "ListTags",
289 "AddTags",
290 "RemoveTags",
291 # command line options support infrastructure
292 "ARGS_MANY_INSTANCES",
293 "ARGS_MANY_NODES",
294 "ARGS_MANY_GROUPS",
295 "ARGS_MANY_NETWORKS",
296 "ARGS_NONE",
297 "ARGS_ONE_INSTANCE",
298 "ARGS_ONE_NODE",
299 "ARGS_ONE_GROUP",
300 "ARGS_ONE_OS",
301 "ARGS_ONE_NETWORK",
302 "ArgChoice",
303 "ArgCommand",
304 "ArgFile",
305 "ArgGroup",
306 "ArgHost",
307 "ArgInstance",
308 "ArgJobId",
309 "ArgNetwork",
310 "ArgNode",
311 "ArgOs",
312 "ArgExtStorage",
313 "ArgSuggest",
314 "ArgUnknown",
315 "OPT_COMPL_INST_ADD_NODES",
316 "OPT_COMPL_MANY_NODES",
317 "OPT_COMPL_ONE_IALLOCATOR",
318 "OPT_COMPL_ONE_INSTANCE",
319 "OPT_COMPL_ONE_NODE",
320 "OPT_COMPL_ONE_NODEGROUP",
321 "OPT_COMPL_ONE_NETWORK",
322 "OPT_COMPL_ONE_OS",
323 "OPT_COMPL_ONE_EXTSTORAGE",
324 "cli_option",
325 "FixHvParams",
326 "SplitNodeOption",
327 "CalculateOSNames",
328 "ParseFields",
329 "COMMON_CREATE_OPTS",
330 ]
331
332 NO_PREFIX = "no_"
333 UN_PREFIX = "-"
334
335 #: Priorities (sorted)
336 _PRIORITY_NAMES = [
337 ("low", constants.OP_PRIO_LOW),
338 ("normal", constants.OP_PRIO_NORMAL),
339 ("high", constants.OP_PRIO_HIGH),
340 ]
341
342 #: Priority dictionary for easier lookup
343 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
344 # we migrate to Python 2.6
345 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
346
347 # Query result status for clients
348 (QR_NORMAL,
349 QR_UNKNOWN,
350 QR_INCOMPLETE) = range(3)
351
352 #: Maximum batch size for ChooseJob
353 _CHOOSE_BATCH = 25
354
355
356 # constants used to create InstancePolicy dictionary
357 TISPECS_GROUP_TYPES = {
358 constants.ISPECS_MIN: constants.VTYPE_INT,
359 constants.ISPECS_MAX: constants.VTYPE_INT,
360 }
361
362 TISPECS_CLUSTER_TYPES = {
363 constants.ISPECS_MIN: constants.VTYPE_INT,
364 constants.ISPECS_MAX: constants.VTYPE_INT,
365 constants.ISPECS_STD: constants.VTYPE_INT,
366 }
367
368 #: User-friendly names for query2 field types
369 _QFT_NAMES = {
370 constants.QFT_UNKNOWN: "Unknown",
371 constants.QFT_TEXT: "Text",
372 constants.QFT_BOOL: "Boolean",
373 constants.QFT_NUMBER: "Number",
374 constants.QFT_NUMBER_FLOAT: "Floating-point number",
375 constants.QFT_UNIT: "Storage size",
376 constants.QFT_TIMESTAMP: "Timestamp",
377 constants.QFT_OTHER: "Custom",
378 }
379
380
381 class _Argument(object):
382 def __init__(self, min=0, max=None): # pylint: disable=W0622
383 self.min = min
384 self.max = max
385
386 def __repr__(self):
387 return ("<%s min=%s max=%s>" %
388 (self.__class__.__name__, self.min, self.max))
389
390
391 class ArgSuggest(_Argument):
392 """Suggesting argument.
393
394 Value can be any of the ones passed to the constructor.
395
396 """
397 # pylint: disable=W0622
398 def __init__(self, min=0, max=None, choices=None):
399 _Argument.__init__(self, min=min, max=max)
400 self.choices = choices
401
402 def __repr__(self):
403 return ("<%s min=%s max=%s choices=%r>" %
404 (self.__class__.__name__, self.min, self.max, self.choices))
405
406
407 class ArgChoice(ArgSuggest):
408 """Choice argument.
409
410 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
411 but value must be one of the choices.
412
413 """
414
415
416 class ArgUnknown(_Argument):
417 """Unknown argument to program (e.g. determined at runtime).
418
419 """
420
421
422 class ArgInstance(_Argument):
423 """Instances argument.
424
425 """
426
427
428 class ArgNode(_Argument):
429 """Node argument.
430
431 """
432
433
434 class ArgNetwork(_Argument):
435 """Network argument.
436
437 """
438
439
440 class ArgGroup(_Argument):
441 """Node group argument.
442
443 """
444
445
446 class ArgJobId(_Argument):
447 """Job ID argument.
448
449 """
450
451
452 class ArgFile(_Argument):
453 """File path argument.
454
455 """
456
457
458 class ArgCommand(_Argument):
459 """Command argument.
460
461 """
462
463
464 class ArgHost(_Argument):
465 """Host argument.
466
467 """
468
469
470 class ArgOs(_Argument):
471 """OS argument.
472
473 """
474
475
476 class ArgExtStorage(_Argument):
477 """ExtStorage argument.
478
479 """
480
481
482 ARGS_NONE = []
483 ARGS_MANY_INSTANCES = [ArgInstance()]
484 ARGS_MANY_NETWORKS = [ArgNetwork()]
485 ARGS_MANY_NODES = [ArgNode()]
486 ARGS_MANY_GROUPS = [ArgGroup()]
487 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
488 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
489 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
490 # TODO
491 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
492 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
493
494
495 def _ExtractTagsObject(opts, args):
496 """Extract the tag type object.
497
498 Note that this function will modify its args parameter.
499
500 """
501 if not hasattr(opts, "tag_type"):
502 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
503 kind = opts.tag_type
504 if kind == constants.TAG_CLUSTER:
505 retval = kind, ""
506 elif kind in (constants.TAG_NODEGROUP,
507 constants.TAG_NODE,
508 constants.TAG_NETWORK,
509 constants.TAG_INSTANCE):
510 if not args:
511 raise errors.OpPrereqError("no arguments passed to the command",
512 errors.ECODE_INVAL)
513 name = args.pop(0)
514 retval = kind, name
515 else:
516 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
517 return retval
518
519
520 def _ExtendTags(opts, args):
521 """Extend the args if a source file has been given.
522
523 This function will extend the tags with the contents of the file
524 passed in the 'tags_source' attribute of the opts parameter. A file
525 named '-' will be replaced by stdin.
526
527 """
528 fname = opts.tags_source
529 if fname is None:
530 return
531 if fname == "-":
532 new_fh = sys.stdin
533 else:
534 new_fh = open(fname, "r")
535 new_data = []
536 try:
537 # we don't use the nice 'new_data = [line.strip() for line in fh]'
538 # because of python bug 1633941
539 while True:
540 line = new_fh.readline()
541 if not line:
542 break
543 new_data.append(line.strip())
544 finally:
545 new_fh.close()
546 args.extend(new_data)
547
548
549 def ListTags(opts, args):
550 """List the tags on a given object.
551
552 This is a generic implementation that knows how to deal with all
553 three cases of tag objects (cluster, node, instance). The opts
554 argument is expected to contain a tag_type field denoting what
555 object type we work on.
556
557 """
558 kind, name = _ExtractTagsObject(opts, args)
559 cl = GetClient()
560 result = cl.QueryTags(kind, name)
561 result = list(result)
562 result.sort()
563 for tag in result:
564 ToStdout(tag)
565
566
567 def AddTags(opts, args):
568 """Add tags on a given object.
569
570 This is a generic implementation that knows how to deal with all
571 three cases of tag objects (cluster, node, instance). The opts
572 argument is expected to contain a tag_type field denoting what
573 object type we work on.
574
575 """
576 kind, name = _ExtractTagsObject(opts, args)
577 _ExtendTags(opts, args)
578 if not args:
579 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
580 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
581 SubmitOrSend(op, opts)
582
583
584 def RemoveTags(opts, args):
585 """Remove tags from a given object.
586
587 This is a generic implementation that knows how to deal with all
588 three cases of tag objects (cluster, node, instance). The opts
589 argument is expected to contain a tag_type field denoting what
590 object type we work on.
591
592 """
593 kind, name = _ExtractTagsObject(opts, args)
594 _ExtendTags(opts, args)
595 if not args:
596 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
597 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
598 SubmitOrSend(op, opts)
599
600
601 def check_unit(option, opt, value): # pylint: disable=W0613
602 """OptParsers custom converter for units.
603
604 """
605 try:
606 return utils.ParseUnit(value)
607 except errors.UnitParseError, err:
608 raise OptionValueError("option %s: %s" % (opt, err))
609
610
611 def _SplitKeyVal(opt, data, parse_prefixes):
612 """Convert a KeyVal string into a dict.
613
614 This function will convert a key=val[,...] string into a dict. Empty
615 values will be converted specially: keys which have the prefix 'no_'
616 will have the value=False and the prefix stripped, keys with the prefix
617 "-" will have value=None and the prefix stripped, and the others will
618 have value=True.
619
620 @type opt: string
621 @param opt: a string holding the option name for which we process the
622 data, used in building error messages
623 @type data: string
624 @param data: a string of the format key=val,key=val,...
625 @type parse_prefixes: bool
626 @param parse_prefixes: whether to handle prefixes specially
627 @rtype: dict
628 @return: {key=val, key=val}
629 @raises errors.ParameterError: if there are duplicate keys
630
631 """
632 kv_dict = {}
633 if data:
634 for elem in utils.UnescapeAndSplit(data, sep=","):
635 if "=" in elem:
636 key, val = elem.split("=", 1)
637 elif parse_prefixes:
638 if elem.startswith(NO_PREFIX):
639 key, val = elem[len(NO_PREFIX):], False
640 elif elem.startswith(UN_PREFIX):
641 key, val = elem[len(UN_PREFIX):], None
642 else:
643 key, val = elem, True
644 else:
645 raise errors.ParameterError("Missing value for key '%s' in option %s" %
646 (elem, opt))
647 if key in kv_dict:
648 raise errors.ParameterError("Duplicate key '%s' in option %s" %
649 (key, opt))
650 kv_dict[key] = val
651 return kv_dict
652
653
654 def _SplitIdentKeyVal(opt, value, parse_prefixes):
655 """Helper function to parse "ident:key=val,key=val" options.
656
657 @type opt: string
658 @param opt: option name, used in error messages
659 @type value: string
660 @param value: expected to be in the format "ident:key=val,key=val,..."
661 @type parse_prefixes: bool
662 @param parse_prefixes: whether to handle prefixes specially (see
663 L{_SplitKeyVal})
664 @rtype: tuple
665 @return: (ident, {key=val, key=val})
666 @raises errors.ParameterError: in case of duplicates or other parsing errors
667
668 """
669 if ":" not in value:
670 ident, rest = value, ""
671 else:
672 ident, rest = value.split(":", 1)
673
674 if parse_prefixes and ident.startswith(NO_PREFIX):
675 if rest:
676 msg = "Cannot pass options when removing parameter groups: %s" % value
677 raise errors.ParameterError(msg)
678 retval = (ident[len(NO_PREFIX):], False)
679 elif (parse_prefixes and ident.startswith(UN_PREFIX) and
680 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
681 if rest:
682 msg = "Cannot pass options when removing parameter groups: %s" % value
683 raise errors.ParameterError(msg)
684 retval = (ident[len(UN_PREFIX):], None)
685 else:
686 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
687 retval = (ident, kv_dict)
688 return retval
689
690
691 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
692 """Custom parser for ident:key=val,key=val options.
693
694 This will store the parsed values as a tuple (ident, {key: val}). As such,
695 multiple uses of this option via action=append is possible.
696
697 """
698 return _SplitIdentKeyVal(opt, value, True)
699
700
701 def check_key_val(option, opt, value): # pylint: disable=W0613
702 """Custom parser class for key=val,key=val options.
703
704 This will store the parsed values as a dict {key: val}.
705
706 """
707 return _SplitKeyVal(opt, value, True)
708
709
710 def check_key_private_val(option, opt, value): # pylint: disable=W0613
711 """Custom parser class for private and secret key=val,key=val options.
712
713 This will store the parsed values as a dict {key: val}.
714
715 """
716 return serializer.PrivateDict(_SplitKeyVal(opt, value, True))
717
718
719 def _SplitListKeyVal(opt, value):
720 retval = {}
721 for elem in value.split("/"):
722 if not elem:
723 raise errors.ParameterError("Empty section in option '%s'" % opt)
724 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
725 if ident in retval:
726 msg = ("Duplicated parameter '%s' in parsing %s: %s" %
727 (ident, opt, elem))
728 raise errors.ParameterError(msg)
729 retval[ident] = valdict
730 return retval
731
732
733 def check_multilist_ident_key_val(_, opt, value):
734 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
735
736 @rtype: list of dictionary
737 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
738
739 """
740 retval = []
741 for line in value.split("//"):
742 retval.append(_SplitListKeyVal(opt, line))
743 return retval
744
745
746 def check_bool(option, opt, value): # pylint: disable=W0613
747 """Custom parser for yes/no options.
748
749 This will store the parsed value as either True or False.
750
751 """
752 value = value.lower()
753 if value == constants.VALUE_FALSE or value == "no":
754 return False
755 elif value == constants.VALUE_TRUE or value == "yes":
756 return True
757 else:
758 raise errors.ParameterError("Invalid boolean value '%s'" % value)
759
760
761 def check_list(option, opt, value): # pylint: disable=W0613
762 """Custom parser for comma-separated lists.
763
764 """
765 # we have to make this explicit check since "".split(",") is [""],
766 # not an empty list :(
767 if not value:
768 return []
769 else:
770 return utils.UnescapeAndSplit(value)
771
772
773 def check_maybefloat(option, opt, value): # pylint: disable=W0613
774 """Custom parser for float numbers which might be also defaults.
775
776 """
777 value = value.lower()
778
779 if value == constants.VALUE_DEFAULT:
780 return value
781 else:
782 return float(value)
783
784
785 # completion_suggestion is normally a list. Using numeric values not evaluating
786 # to False for dynamic completion.
787 (OPT_COMPL_MANY_NODES,
788 OPT_COMPL_ONE_NODE,
789 OPT_COMPL_ONE_INSTANCE,
790 OPT_COMPL_ONE_OS,
791 OPT_COMPL_ONE_EXTSTORAGE,
792 OPT_COMPL_ONE_IALLOCATOR,
793 OPT_COMPL_ONE_NETWORK,
794 OPT_COMPL_INST_ADD_NODES,
795 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
796
797 OPT_COMPL_ALL = compat.UniqueFrozenset([
798 OPT_COMPL_MANY_NODES,
799 OPT_COMPL_ONE_NODE,
800 OPT_COMPL_ONE_INSTANCE,
801 OPT_COMPL_ONE_OS,
802 OPT_COMPL_ONE_EXTSTORAGE,
803 OPT_COMPL_ONE_IALLOCATOR,
804 OPT_COMPL_ONE_NETWORK,
805 OPT_COMPL_INST_ADD_NODES,
806 OPT_COMPL_ONE_NODEGROUP,
807 ])
808
809
810 class CliOption(Option):
811 """Custom option class for optparse.
812
813 """
814 ATTRS = Option.ATTRS + [
815 "completion_suggest",
816 ]
817 TYPES = Option.TYPES + (
818 "multilistidentkeyval",
819 "identkeyval",
820 "keyval",
821 "keyprivateval",
822 "unit",
823 "bool",
824 "list",
825 "maybefloat",
826 )
827 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
828 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
829 TYPE_CHECKER["identkeyval"] = check_ident_key_val
830 TYPE_CHECKER["keyval"] = check_key_val
831 TYPE_CHECKER["keyprivateval"] = check_key_private_val
832 TYPE_CHECKER["unit"] = check_unit
833 TYPE_CHECKER["bool"] = check_bool
834 TYPE_CHECKER["list"] = check_list
835 TYPE_CHECKER["maybefloat"] = check_maybefloat
836
837
838 # optparse.py sets make_option, so we do it for our own option class, too
839 cli_option = CliOption
840
841
842 _YORNO = "yes|no"
843
844 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
845 help="Increase debugging level")
846
847 NOHDR_OPT = cli_option("--no-headers", default=False,
848 action="store_true", dest="no_headers",
849 help="Don't display column headers")
850
851 SEP_OPT = cli_option("--separator", default=None,
852 action="store", dest="separator",
853 help=("Separator between output fields"
854 " (defaults to one space)"))
855
856 USEUNITS_OPT = cli_option("--units", default=None,
857 dest="units", choices=("h", "m", "g", "t"),
858 help="Specify units for output (one of h/m/g/t)")
859
860 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
861 type="string", metavar="FIELDS",
862 help="Comma separated list of output fields")
863
864 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
865 default=False, help="Force the operation")
866
867 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
868 default=False, help="Do not require confirmation")
869
870 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
871 action="store_true", default=False,
872 help=("Ignore offline nodes and do as much"
873 " as possible"))
874
875 TAG_ADD_OPT = cli_option("--tags", dest="tags",
876 default=None, help="Comma-separated list of instance"
877 " tags")
878
879 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
880 default=None, help="File with tag names")
881
882 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
883 default=False, action="store_true",
884 help=("Submit the job and return the job ID, but"
885 " don't wait for the job to finish"))
886
887 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
888 default=False, action="store_true",
889 help=("Additionally print the job as first line"
890 " on stdout (for scripting)."))
891
892 SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential",
893 default=False, action="store_true",
894 help=("Execute all resulting jobs sequentially"))
895
896 SYNC_OPT = cli_option("--sync", dest="do_locking",
897 default=False, action="store_true",
898 help=("Grab locks while doing the queries"
899 " in order to ensure more consistent results"))
900
901 DRY_RUN_OPT = cli_option("--dry-run", default=False,
902 action="store_true",
903 help=("Do not execute the operation, just run the"
904 " check steps and verify if it could be"
905 " executed"))
906
907 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
908 action="store_true",
909 help="Increase the verbosity of the operation")
910
911 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
912 action="store_true", dest="simulate_errors",
913 help="Debugging option that makes the operation"
914 " treat most runtime checks as failed")
915
916 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
917 default=True, action="store_false",
918 help="Don't wait for sync (DANGEROUS!)")
919
920 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
921 default=False, action="store_true",
922 help="Wait for disks to sync")
923
924 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
925 action="store_true", default=False,
926 help="Enable offline instance")
927
928 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
929 action="store_true", default=False,
930 help="Disable down instance")
931
932 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
933 help=("Custom disk setup (%s)" %
934 utils.CommaJoin(constants.DISK_TEMPLATES)),
935 default=None, metavar="TEMPL",
936 choices=list(constants.DISK_TEMPLATES))
937
938 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
939 help="Do not create any network cards for"
940 " the instance")
941
942 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
943 help="Relative path under default cluster-wide"
944 " file storage dir to store file-based disks",
945 default=None, metavar="<DIR>")
946
947 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
948 help="Driver to use for image files",
949 default=None, metavar="<DRIVER>",
950 choices=list(constants.FILE_DRIVER))
951
952 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
953 help="Select nodes for the instance automatically"
954 " using the <NAME> iallocator plugin",
955 default=None, type="string",
956 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
957
958 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
959 metavar="<NAME>",
960 help="Set the default instance"
961 " allocator plugin",
962 default=None, type="string",
963 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
964
965 DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
966 dest="default_iallocator_params",
967 help="iallocator template"
968 " parameters, in the format"
969 " template:option=value,"
970 " option=value,...",
971 type="keyval",
972 default={})
973
974 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
975 metavar="<os>",
976 completion_suggest=OPT_COMPL_ONE_OS)
977
978 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
979 type="keyval", default={},
980 help="OS parameters")
981
982 OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private",
983 dest="osparams_private",
984 type="keyprivateval",
985 default=serializer.PrivateDict(),
986 help="Private OS parameters"
987 " (won't be logged)")
988
989 OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret",
990 dest="osparams_secret",
991 type="keyprivateval",
992 default=serializer.PrivateDict(),
993 help="Secret OS parameters (won't be logged or"
994 " saved; you must supply these for every"
995 " operation.)")
996
997 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
998 action="store_true", default=False,
999 help="Force an unknown variant")
1000
1001 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
1002 action="store_true", default=False,
1003 help="Do not install the OS (will"
1004 " enable no-start)")
1005
1006 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
1007 dest="allow_runtime_chgs",
1008 default=True, action="store_false",
1009 help="Don't allow runtime changes")
1010
1011 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
1012 type="keyval", default={},
1013 help="Backend parameters")
1014
1015 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
1016 default={}, dest="hvparams",
1017 help="Hypervisor parameters")
1018
1019 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
1020 help="Disk template parameters, in the format"
1021 " template:option=value,option=value,...",
1022 type="identkeyval", action="append", default=[])
1023
1024 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
1025 type="keyval", default={},
1026 help="Memory size specs: list of key=value,"
1027 " where key is one of min, max, std"
1028 " (in MB or using a unit)")
1029
1030 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
1031 type="keyval", default={},
1032 help="CPU count specs: list of key=value,"
1033 " where key is one of min, max, std")
1034
1035 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
1036 dest="ispecs_disk_count",
1037 type="keyval", default={},
1038 help="Disk count specs: list of key=value,"
1039 " where key is one of min, max, std")
1040
1041 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
1042 type="keyval", default={},
1043 help="Disk size specs: list of key=value,"
1044 " where key is one of min, max, std"
1045 " (in MB or using a unit)")
1046
1047 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
1048 type="keyval", default={},
1049 help="NIC count specs: list of key=value,"
1050 " where key is one of min, max, std")
1051
1052 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
1053 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
1054 dest="ipolicy_bounds_specs",
1055 type="multilistidentkeyval", default=None,
1056 help="Complete instance specs limits")
1057
1058 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1059 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1060 dest="ipolicy_std_specs",
1061 type="keyval", default=None,
1062 help="Complete standard instance specs")
1063
1064 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1065 dest="ipolicy_disk_templates",
1066 type="list", default=None,
1067 help="Comma-separated list of"
1068 " enabled disk templates")
1069
1070 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1071 dest="ipolicy_vcpu_ratio",
1072 type="maybefloat", default=None,
1073 help="The maximum allowed vcpu-to-cpu ratio")
1074
1075 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1076 dest="ipolicy_spindle_ratio",
1077 type="maybefloat", default=None,
1078 help=("The maximum allowed instances to"
1079 " spindle ratio"))
1080
1081 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1082 help="Hypervisor and hypervisor options, in the"
1083 " format hypervisor:option=value,option=value,...",
1084 default=None, type="identkeyval")
1085
1086 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1087 help="Hypervisor and hypervisor options, in the"
1088 " format hypervisor:option=value,option=value,...",
1089 default=[], action="append", type="identkeyval")
1090
1091 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1092 action="store_false",
1093 help="Don't check that the instance's IP"
1094 " is alive")
1095
1096 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1097 default=True, action="store_false",
1098 help="Don't check that the instance's name"
1099 " is resolvable")
1100
1101 NET_OPT = cli_option("--net",
1102 help="NIC parameters", default=[],
1103 dest="nics", action="append", type="identkeyval")
1104
1105 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1106 dest="disks", action="append", type="identkeyval")
1107
1108 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1109 help="Comma-separated list of disks"
1110 " indices to act on (e.g. 0,2) (optional,"
1111 " defaults to all disks)")
1112
1113 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1114 help="Enforces a single-disk configuration using the"
1115 " given disk size, in MiB unless a suffix is used",
1116 default=None, type="unit", metavar="<size>")
1117
1118 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1119 dest="ignore_consistency",
1120 action="store_true", default=False,
1121 help="Ignore the consistency of the disks on"
1122 " the secondary")
1123
1124 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1125 dest="allow_failover",
1126 action="store_true", default=False,
1127 help="If migration is not possible fallback to"
1128 " failover")
1129
1130 FORCE_FAILOVER_OPT = cli_option("--force-failover",
1131 dest="force_failover",
1132 action="store_true", default=False,
1133 help="Do not use migration, always use"
1134 " failover")
1135
1136 NONLIVE_OPT = cli_option("--non-live", dest="live",
1137 default=True, action="store_false",
1138 help="Do a non-live migration (this usually means"
1139 " freeze the instance, save the state, transfer and"
1140 " only then resume running on the secondary node)")
1141
1142 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1143 default=None,
1144 choices=list(constants.HT_MIGRATION_MODES),
1145 help="Override default migration mode (choose"
1146 " either live or non-live")
1147
1148 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1149 help="Target node and optional secondary node",
1150 metavar="<pnode>[:<snode>]",
1151 completion_suggest=OPT_COMPL_INST_ADD_NODES)
1152
1153 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1154 action="append", metavar="<node>",
1155 help="Use only this node (can be used multiple"
1156 " times, if not given defaults to all nodes)",
1157 completion_suggest=OPT_COMPL_ONE_NODE)
1158
1159 NODEGROUP_OPT_NAME = "--node-group"
1160 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1161 dest="nodegroup",
1162 help="Node group (name or uuid)",
1163 metavar="<nodegroup>",
1164 default=None, type="string",
1165 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1166
1167 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1168 metavar="<node>",
1169 completion_suggest=OPT_COMPL_ONE_NODE)
1170
1171 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1172 action="store_false",
1173 help="Don't start the instance after creation")
1174
1175 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1176 action="store_true", default=False,
1177 help="Show command instead of executing it")
1178
1179 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1180 default=False, action="store_true",
1181 help="Instead of performing the migration/failover,"
1182 " try to recover from a failed cleanup. This is safe"
1183 " to run even if the instance is healthy, but it"
1184 " will create extra replication traffic and "
1185 " disrupt briefly the replication (like during the"
1186 " migration/failover")
1187
1188 STATIC_OPT = cli_option("-s", "--static", dest="static",
1189 action="store_true", default=False,
1190 help="Only show configuration data, not runtime data")
1191
1192 ALL_OPT = cli_option("--all", dest="show_all",
1193 default=False, action="store_true",
1194 help="Show info on all instances on the cluster."
1195 " This can take a long time to run, use wisely")
1196
1197 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1198 action="store_true", default=False,
1199 help="Interactive OS reinstall, lists available"
1200 " OS templates for selection")
1201
1202 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1203 action="store_true", default=False,
1204 help="Remove the instance from the cluster"
1205 " configuration even if there are failures"
1206 " during the removal process")
1207
1208 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1209 dest="ignore_remove_failures",
1210 action="store_true", default=False,
1211 help="Remove the instance from the"
1212 " cluster configuration even if there"
1213 " are failures during the removal"
1214 " process")
1215
1216 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1217 action="store_true", default=False,
1218 help="Remove the instance from the cluster")
1219
1220 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1221 help="Specifies the new node for the instance",
1222 metavar="NODE", default=None,
1223 completion_suggest=OPT_COMPL_ONE_NODE)
1224
1225 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1226 help="Specifies the new secondary node",
1227 metavar="NODE", default=None,
1228 completion_suggest=OPT_COMPL_ONE_NODE)
1229
1230 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1231 help="Specifies the new primary node",
1232 metavar="<node>", default=None,
1233 completion_suggest=OPT_COMPL_ONE_NODE)
1234
1235 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1236 default=False, action="store_true",
1237 help="Replace the disk(s) on the primary"
1238 " node (applies only to internally mirrored"
1239 " disk templates, e.g. %s)" %
1240 utils.CommaJoin(constants.DTS_INT_MIRROR))
1241
1242 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1243 default=False, action="store_true",
1244 help="Replace the disk(s) on the secondary"
1245 " node (applies only to internally mirrored"
1246 " disk templates, e.g. %s)" %
1247 utils.CommaJoin(constants.DTS_INT_MIRROR))
1248
1249 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1250 default=False, action="store_true",
1251 help="Lock all nodes and auto-promote as needed"
1252 " to MC status")
1253
1254 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1255 default=False, action="store_true",
1256 help="Automatically replace faulty disks"
1257 " (applies only to internally mirrored"
1258 " disk templates, e.g. %s)" %
1259 utils.CommaJoin(constants.DTS_INT_MIRROR))
1260
1261 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1262 default=False, action="store_true",
1263 help="Ignore current recorded size"
1264 " (useful for forcing activation when"
1265 " the recorded size is wrong)")
1266
1267 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1268 metavar="<node>",
1269 completion_suggest=OPT_COMPL_ONE_NODE)
1270
1271 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1272 metavar="<dir>")
1273
1274 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1275 help="Specify the secondary ip for the node",
1276 metavar="ADDRESS", default=None)
1277
1278 READD_OPT = cli_option("--readd", dest="readd",
1279 default=False, action="store_true",
1280 help="Readd old node after replacing it")
1281
1282 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1283 default=True, action="store_false",
1284 help="Disable SSH key fingerprint checking")
1285
1286 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1287 default=False, action="store_true",
1288 help="Force the joining of a node")
1289
1290 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1291 type="bool", default=None, metavar=_YORNO,
1292 help="Set the master_candidate flag on the node")
1293
1294 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1295 type="bool", default=None,
1296 help=("Set the offline flag on the node"
1297 " (cluster does not communicate with offline"
1298 " nodes)"))
1299
1300 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1301 type="bool", default=None,
1302 help=("Set the drained flag on the node"
1303 " (excluded from allocation operations)"))
1304
1305 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1306 type="bool", default=None, metavar=_YORNO,
1307 help="Set the master_capable flag on the node")
1308
1309 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1310 type="bool", default=None, metavar=_YORNO,
1311 help="Set the vm_capable flag on the node")
1312
1313 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1314 type="bool", default=None, metavar=_YORNO,
1315 help="Set the allocatable flag on a volume")
1316
1317 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1318 dest="enabled_hypervisors",
1319 help="Comma-separated list of hypervisors",
1320 type="string", default=None)
1321
1322 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1323 dest="enabled_disk_templates",
1324 help="Comma-separated list of "
1325 "disk templates",
1326 type="string", default=None)
1327
1328 ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
1329 default=None,
1330 dest="enabled_user_shutdown",
1331 help="Whether user shutdown is enabled",
1332 type="bool")
1333
1334 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1335 type="keyval", default={},
1336 help="NIC parameters")
1337
1338 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1339 dest="candidate_pool_size", type="int",
1340 help="Set the candidate pool size")
1341
1342 RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1343 type="int", help="Set the maximal number of jobs to "
1344 "run simultaneously")
1345
1346 MAX_TRACK_OPT = cli_option("--max-tracked-jobs", dest="max_tracked_jobs",
1347 type="int", help="Set the maximal number of jobs to "
1348 "be tracked simultaneously for "
1349 "scheduling")
1350
1351 COMPRESSION_TOOLS_OPT = \
1352 cli_option("--compression-tools",
1353 dest="compression_tools", type="string", default=None,
1354 help="Comma-separated list of compression tools which are"
1355 " allowed to be used by Ganeti in various operations")
1356
1357 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1358 help=("Enables LVM and specifies the volume group"
1359 " name (cluster-wide) for disk allocation"
1360 " [%s]" % constants.DEFAULT_VG),
1361 metavar="VG", default=None)
1362
1363 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1364 help="Destroy cluster", action="store_true")
1365
1366 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1367 help="Skip node agreement check (dangerous)",
1368 action="store_true", default=False)
1369
1370 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1371 help="Specify the mac prefix for the instance IP"
1372 " addresses, in the format XX:XX:XX",
1373 metavar="PREFIX",
1374 default=None)
1375
1376 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1377 help="Specify the node interface (cluster-wide)"
1378 " on which the master IP address will be added"
1379 " (cluster init default: %s)" %
1380 constants.DEFAULT_BRIDGE,
1381 metavar="NETDEV",
1382 default=None)
1383
1384 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1385 help="Specify the netmask of the master IP",
1386 metavar="NETMASK",
1387 default=None)
1388
1389 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1390 dest="use_external_mip_script",
1391 help="Specify whether to run a"
1392 " user-provided script for the master"
1393 " IP address turnup and"
1394 " turndown operations",
1395 type="bool", metavar=_YORNO, default=None)
1396
1397 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1398 help="Specify the default directory (cluster-"
1399 "wide) for storing the file-based disks [%s]" %
1400 pathutils.DEFAULT_FILE_STORAGE_DIR,
1401 metavar="DIR",
1402 default=None)
1403
1404 GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1405 "--shared-file-storage-dir",
1406 dest="shared_file_storage_dir",
1407 help="Specify the default directory (cluster-wide) for storing the"
1408 " shared file-based disks [%s]" %
1409 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1410 metavar="SHAREDDIR", default=None)
1411
1412 GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1413 "--gluster-storage-dir",
1414 dest="gluster_storage_dir",
1415 help="Specify the default directory (cluster-wide) for mounting Gluster"
1416 " file systems [%s]" %
1417 pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1418 metavar="GLUSTERDIR",
1419 default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1420
1421 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1422 help="Don't modify %s" % pathutils.ETC_HOSTS,
1423 action="store_false", default=True)
1424
1425 MODIFY_ETCHOSTS_OPT = \
1426 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1427 default=None, type="bool",
1428 help="Defines whether the cluster should autonomously modify"
1429 " and keep in sync the /etc/hosts file of the nodes")
1430
1431 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1432 help="Don't initialize SSH keys",
1433 action="store_false", default=True)
1434
1435 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1436 help="Enable parseable error messages",
1437 action="store_true", default=False)
1438
1439 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1440 help="Skip N+1 memory redundancy tests",
1441 action="store_true", default=False)
1442
1443 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1444 help="Type of reboot: soft/hard/full",
1445 default=constants.INSTANCE_REBOOT_HARD,
1446 metavar="<REBOOT>",
1447 choices=list(constants.REBOOT_TYPES))
1448
1449 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1450 dest="ignore_secondaries",
1451 default=False, action="store_true",
1452 help="Ignore errors from secondaries")
1453
1454 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1455 action="store_false", default=True,
1456 help="Don't shutdown the instance (unsafe)")
1457
1458 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1459 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1460 help="Maximum time to wait")
1461
1462 COMPRESS_OPT = cli_option("--compress", dest="compress",
1463 type="string", default=constants.IEC_NONE,
1464 help="The compression mode to use")
1465
1466 TRANSPORT_COMPRESSION_OPT = \
1467 cli_option("--transport-compression", dest="transport_compression",
1468 type="string", default=constants.IEC_NONE,
1469 help="The compression mode to use during transport")
1470
1471 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1472 dest="shutdown_timeout", type="int",
1473 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1474 help="Maximum time to wait for instance"
1475 " shutdown")
1476
1477 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1478 default=None,
1479 help=("Number of seconds between repetions of the"
1480 " command"))
1481
1482 EARLY_RELEASE_OPT = cli_option("--early-release",
1483 dest="early_release", default=False,
1484 action="store_true",
1485 help="Release the locks on the secondary"
1486 " node(s) early")
1487
1488 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1489 dest="new_cluster_cert",
1490 default=False, action="store_true",
1491 help="Generate a new cluster certificate")
1492
1493 NEW_NODE_CERT_OPT = cli_option(
1494 "--new-node-certificates", dest="new_node_cert", default=False,
1495 action="store_true", help="Generate new node certificates (for all nodes)")
1496
1497 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1498 default=None,
1499 help="File containing new RAPI certificate")
1500
1501 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1502 default=None, action="store_true",
1503 help=("Generate a new self-signed RAPI"
1504 " certificate"))
1505
1506 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1507 default=None,
1508 help="File containing new SPICE certificate")
1509
1510 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1511 default=None,
1512 help="File containing the certificate of the CA"
1513 " which signed the SPICE certificate")
1514
1515 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1516 dest="new_spice_cert", default=None,
1517 action="store_true",
1518 help=("Generate a new self-signed SPICE"
1519 " certificate"))
1520
1521 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1522 dest="new_confd_hmac_key",
1523 default=False, action="store_true",
1524 help=("Create a new HMAC key for %s" %
1525 constants.CONFD))
1526
1527 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1528 dest="cluster_domain_secret",
1529 default=None,
1530 help=("Load new new cluster domain"
1531 " secret from file"))
1532
1533 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1534 dest="new_cluster_domain_secret",
1535 default=False, action="store_true",
1536 help=("Create a new cluster domain"
1537 " secret"))
1538
1539 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1540 dest="use_replication_network",
1541 help="Whether to use the replication network"
1542 " for talking to the nodes",
1543 action="store_true", default=False)
1544
1545 MAINTAIN_NODE_HEALTH_OPT = \
1546 cli_option("--maintain-node-health", dest="maintain_node_health",
1547 metavar=_YORNO, default=None, type="bool",
1548 help="Configure the cluster to automatically maintain node"
1549 " health, by shutting down unknown instances, shutting down"
1550 " unknown DRBD devices, etc.")
1551
1552 IDENTIFY_DEFAULTS_OPT = \
1553 cli_option("--identify-defaults", dest="identify_defaults",
1554 default=False, action="store_true",
1555 help="Identify which saved instance parameters are equal to"
1556 " the current cluster defaults and set them as such, instead"
1557 " of marking them as overridden")
1558
1559 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1560 action="store", dest="uid_pool",
1561 help=("A list of user-ids or user-id"
1562 " ranges separated by commas"))
1563
1564 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1565 action="store", dest="add_uids",
1566 help=("A list of user-ids or user-id"
1567 " ranges separated by commas, to be"
1568 " added to the user-id pool"))
1569
1570 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1571 action="store", dest="remove_uids",
1572 help=("A list of user-ids or user-id"
1573 " ranges separated by commas, to be"
1574 " removed from the user-id pool"))
1575
1576 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1577 action="store", dest="reserved_lvs",
1578 help=("A comma-separated list of reserved"
1579 " logical volumes names, that will be"
1580 " ignored by cluster verify"))
1581
1582 ROMAN_OPT = cli_option("--roman",
1583 dest="roman_integers", default=False,
1584 action="store_true",
1585 help="Use roman numbers for positive integers")
1586
1587 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1588 action="store", default=None,
1589 help="Specifies usermode helper for DRBD")
1590
1591 PRIMARY_IP_VERSION_OPT = \
1592 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1593 action="store", dest="primary_ip_version",
1594 metavar="%d|%d" % (constants.IP4_VERSION,
1595 constants.IP6_VERSION),
1596 help="Cluster-wide IP version for primary IP")
1597
1598 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1599 action="store_true",
1600 help="Show machine name for every line in output")
1601
1602 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1603 action="store_true",
1604 help=("Hide successful results and show failures"
1605 " only (determined by the exit code)"))
1606
1607 REASON_OPT = cli_option("--reason", default=None,
1608 help="The reason for executing the command")
1609
1610
1611 def _PriorityOptionCb(option, _, value, parser):
1612 """Callback for processing C{--priority} option.
1613
1614 """
1615 value = _PRIONAME_TO_VALUE[value]
1616
1617 setattr(parser.values, option.dest, value)
1618
1619
1620 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1621 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1622 choices=_PRIONAME_TO_VALUE.keys(),
1623 action="callback", type="choice",
1624 callback=_PriorityOptionCb,
1625 help="Priority for opcode processing")
1626
1627 OPPORTUNISTIC_OPT = cli_option("--opportunistic-locking",
1628 dest="opportunistic_locking",
1629 action="store_true", default=False,
1630 help="Opportunistically acquire locks")
1631
1632 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1633 type="bool", default=None, metavar=_YORNO,
1634 help="Sets the hidden flag on the OS")
1635
1636 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1637 type="bool", default=None, metavar=_YORNO,
1638 help="Sets the blacklisted flag on the OS")
1639
1640 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1641 type="bool", metavar=_YORNO,
1642 dest="prealloc_wipe_disks",
1643 help=("Wipe disks prior to instance"
1644 " creation"))
1645
1646 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1647 type="keyval", default=None,
1648 help="Node parameters")
1649
1650 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1651 action="store", metavar="POLICY", default=None,
1652 help="Allocation policy for the node group")
1653
1654 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1655 type="bool", metavar=_YORNO,
1656 dest="node_powered",
1657 help="Specify if the SoR for node is powered")
1658
1659 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1660 default=constants.OOB_TIMEOUT,
1661 help="Maximum time to wait for out-of-band helper")
1662
1663 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1664 default=constants.OOB_POWER_DELAY,
1665 help="Time in seconds to wait between power-ons")
1666
1667 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1668 action="store_true", default=False,
1669 help=("Whether command argument should be treated"
1670 " as filter"))
1671
1672 NO_REMEMBER_OPT = cli_option("--no-remember",
1673 dest="no_remember",
1674 action="store_true", default=False,
1675 help="Perform but do not record the change"
1676 " in the configuration")
1677
1678 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1679 default=False, action="store_true",
1680 help="Evacuate primary instances only")
1681
1682 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1683 default=False, action="store_true",
1684 help="Evacuate secondary instances only"
1685 " (applies only to internally mirrored"
1686 " disk templates, e.g. %s)" %
1687 utils.CommaJoin(constants.DTS_INT_MIRROR))
1688
1689 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1690 action="store_true", default=False,
1691 help="Pause instance at startup")
1692
1693 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1694 help="Destination node group (name or uuid)",
1695 default=None, action="append",
1696 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1697
1698 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1699 action="append", dest="ignore_errors",
1700 choices=list(constants.CV_ALL_ECODES_STRINGS),
1701 help="Error code to be ignored")
1702
1703 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1704 action="append",
1705 help=("Specify disk state information in the"
1706 " format"
1707 " storage_type/identifier:option=value,...;"
1708 " note this is unused for now"),
1709 type="identkeyval")
1710
1711 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1712 action="append",
1713 help=("Specify hypervisor state information in the"
1714 " format hypervisor:option=value,...;"
1715 " note this is unused for now"),
1716 type="identkeyval")
1717
1718 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1719 action="store_true", default=False,
1720 help="Ignore instance policy violations")
1721
1722 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1723 help="Sets the instance's runtime memory,"
1724 " ballooning it up or down to the new value",
1725 default=None, type="unit", metavar="<size>")
1726
1727 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1728 action="store_true", default=False,
1729 help="Marks the grow as absolute instead of the"
1730 " (default) relative mode")
1731
1732 NETWORK_OPT = cli_option("--network",
1733 action="store", default=None, dest="network",
1734 help="IP network in CIDR notation")
1735
1736 GATEWAY_OPT = cli_option("--gateway",
1737 action="store", default=None, dest="gateway",
1738 help="IP address of the router (gateway)")
1739
1740 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1741 action="store", default=None,
1742 dest="add_reserved_ips",
1743 help="Comma-separated list of"
1744 " reserved IPs to add")
1745
1746 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1747 action="store", default=None,
1748 dest="remove_reserved_ips",
1749 help="Comma-delimited list of"
1750 " reserved IPs to remove")
1751
1752 NETWORK6_OPT = cli_option("--network6",
1753 action="store", default=None, dest="network6",
1754 help="IP network in CIDR notation")
1755
1756 GATEWAY6_OPT = cli_option("--gateway6",
1757 action="store", default=None, dest="gateway6",
1758 help="IP6 address of the router (gateway)")
1759
1760 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1761 dest="conflicts_check",
1762 default=True,
1763 action="store_false",
1764 help="Don't check for conflicting IPs")
1765
1766 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1767 default=False, action="store_true",
1768 help="Include default values")
1769
1770 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1771 action="store_true", default=False,
1772 help="Hotplug supported devices (NICs and Disks)")
1773
1774 HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1775 dest="hotplug_if_possible",
1776 action="store_true", default=False,
1777 help="Hotplug devices in case"
1778 " hotplug is supported")
1779
1780 INSTALL_IMAGE_OPT = \
1781 cli_option("--install-image",
1782 dest="install_image",
1783 action="store",
1784 type="string",
1785 default=None,
1786 help="The OS image to use for running the OS scripts safely")
1787
1788 INSTANCE_COMMUNICATION_OPT = \
1789 cli_option("-c", "--communication",
1790 dest="instance_communication",
1791 help=constants.INSTANCE_COMMUNICATION_DOC,
1792 type="bool")
1793
1794 INSTANCE_COMMUNICATION_NETWORK_OPT = \
1795 cli_option("--instance-communication-network",
1796 dest="instance_communication_network",
1797 type="string",
1798 help="Set the network name for instance communication")
1799
1800 ZEROING_IMAGE_OPT = \
1801 cli_option("--zeroing-image",
1802 dest="zeroing_image", action="store", default=None,
1803 help="The OS image to use to zero instance disks")
1804
1805 ZERO_FREE_SPACE_OPT = \
1806 cli_option("--zero-free-space",
1807 dest="zero_free_space", action="store_true", default=False,
1808 help="Whether to zero the free space on the disks of the "
1809 "instance prior to the export")
1810
1811 HELPER_STARTUP_TIMEOUT_OPT = \
1812 cli_option("--helper-startup-timeout",
1813 dest="helper_startup_timeout", action="store", type="int",
1814 help="Startup timeout for the helper VM")
1815
1816 HELPER_SHUTDOWN_TIMEOUT_OPT = \
1817 cli_option("--helper-shutdown-timeout",
1818 dest="helper_shutdown_timeout", action="store", type="int",
1819 help="Shutdown timeout for the helper VM")
1820
1821 ZEROING_TIMEOUT_FIXED_OPT = \
1822 cli_option("--zeroing-timeout-fixed",
1823 dest="zeroing_timeout_fixed", action="store", type="int",
1824 help="The fixed amount of time to wait before assuming that the "
1825 "zeroing failed")
1826
1827 ZEROING_TIMEOUT_PER_MIB_OPT = \
1828 cli_option("--zeroing-timeout-per-mib",
1829 dest="zeroing_timeout_per_mib", action="store", type="float",
1830 help="The amount of time to wait per MiB of data to zero, in "
1831 "addition to the fixed timeout")
1832
1833 #: Options provided by all commands
1834 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1835
1836 # options related to asynchronous job handling
1837
1838 SUBMIT_OPTS = [
1839 SUBMIT_OPT,
1840 PRINT_JOBID_OPT,
1841 ]
1842
1843 # common options for creating instances. add and import then add their own
1844 # specific ones.
1845 COMMON_CREATE_OPTS = [
1846 BACKEND_OPT,
1847 DISK_OPT,
1848 DISK_TEMPLATE_OPT,
1849 FILESTORE_DIR_OPT,
1850 FILESTORE_DRIVER_OPT,
1851 HYPERVISOR_OPT,
1852 IALLOCATOR_OPT,
1853 NET_OPT,
1854 NODE_PLACEMENT_OPT,
1855 NOIPCHECK_OPT,
1856 NOCONFLICTSCHECK_OPT,
1857 NONAMECHECK_OPT,
1858 NONICS_OPT,
1859 NWSYNC_OPT,
1860 OSPARAMS_OPT,
1861 OSPARAMS_PRIVATE_OPT,
1862 OSPARAMS_SECRET_OPT,
1863 OS_SIZE_OPT,
1864 OPPORTUNISTIC_OPT,
1865 SUBMIT_OPT,
1866 PRINT_JOBID_OPT,
1867 TAG_ADD_OPT,
1868 DRY_RUN_OPT,
1869 PRIORITY_OPT,
1870 ]
1871
1872 # common instance policy options
1873 INSTANCE_POLICY_OPTS = [
1874 IPOLICY_BOUNDS_SPECS_OPT,
1875 IPOLICY_DISK_TEMPLATES,
1876 IPOLICY_VCPU_RATIO,
1877 IPOLICY_SPINDLE_RATIO,
1878 ]
1879
1880 # instance policy split specs options
1881 SPLIT_ISPECS_OPTS = [
1882 SPECS_CPU_COUNT_OPT,
1883 SPECS_DISK_COUNT_OPT,
1884 SPECS_DISK_SIZE_OPT,
1885 SPECS_MEM_SIZE_OPT,
1886 SPECS_NIC_COUNT_OPT,
1887 ]
1888
1889
1890 class _ShowUsage(Exception):
1891 """Exception class for L{_ParseArgs}.
1892
1893 """
1894 def __init__(self, exit_error):
1895 """Initializes instances of this class.
1896
1897 @type exit_error: bool
1898 @param exit_error: Whether to report failure on exit
1899
1900 """
1901 Exception.__init__(self)
1902 self.exit_error = exit_error
1903
1904
1905 class _ShowVersion(Exception):
1906 """Exception class for L{_ParseArgs}.
1907
1908 """
1909
1910
1911 def _ParseArgs(binary, argv, commands, aliases, env_override):
1912 """Parser for the command line arguments.
1913
1914 This function parses the arguments and returns the function which
1915 must be executed together with its (modified) arguments.
1916
1917 @param binary: Script name
1918 @param argv: Command line arguments
1919 @param commands: Dictionary containing command definitions
1920 @param aliases: dictionary with command aliases {"alias": "target", ...}
1921 @param env_override: list of env variables allowed for default args
1922 @raise _ShowUsage: If usage description should be shown
1923 @raise _ShowVersion: If version should be shown
1924
1925 """
1926 assert not (env_override - set(commands))
1927 assert not (set(aliases.keys()) & set(commands.keys()))
1928
1929 if len(argv) > 1:
1930 cmd = argv[1]
1931 else:
1932 # No option or command given
1933 raise _ShowUsage(exit_error=True)
1934
1935 if cmd == "--version":
1936 raise _ShowVersion()
1937 elif cmd == "--help":
1938 raise _ShowUsage(exit_error=False)
1939 elif not (cmd in commands or cmd in aliases):
1940 raise _ShowUsage(exit_error=True)
1941
1942 # get command, unalias it, and look it up in commands
1943 if cmd in aliases:
1944 if aliases[cmd] not in commands:
1945 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1946 " command '%s'" % (cmd, aliases[cmd]))
1947
1948 cmd = aliases[cmd]
1949
1950 if cmd in env_override:
1951 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1952 env_args = os.environ.get(args_env_name)
1953 if env_args:
1954 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1955
1956 func, args_def, parser_opts, usage, description = commands[cmd]
1957 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1958 description=description,
1959 formatter=TitledHelpFormatter(),
1960 usage="%%prog %s %s" % (cmd, usage))
1961 parser.disable_interspersed_args()
1962 options, args = parser.parse_args(args=argv[2:])
1963
1964 if not _CheckArguments(cmd, args_def, args):
1965 return None, None, None
1966
1967 return func, options, args
1968
1969
1970 def _FormatUsage(binary, commands):
1971 """Generates a nice description of all commands.
1972
1973 @param binary: Script name
1974 @param commands: Dictionary containing command definitions
1975
1976 """
1977 # compute the max line length for cmd + usage
1978 mlen = min(60, max(map(len, commands)))
1979
1980 yield "Usage: %s {command} [options...] [argument...]" % binary
1981 yield "%s <command> --help to see details, or man %s" % (binary, binary)
1982 yield ""
1983 yield "Commands:"
1984
1985 # and format a nice command list
1986 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1987 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1988 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1989 for line in help_lines:
1990 yield " %-*s %s" % (mlen, "", line)
1991
1992 yield ""
1993
1994
1995 def _CheckArguments(cmd, args_def, args):
1996 """Verifies the arguments using the argument definition.
1997
1998 Algorithm:
1999
2000 1. Abort with error if values specified by user but none expected.
2001
2002 1. For each argument in definition
2003
2004 1. Keep running count of minimum number of values (min_count)
2005 1. Keep running count of maximum number of values (max_count)
2006 1. If it has an unlimited number of values
2007
2008 1. Abort with error if it's not the last argument in the definition
2009
2010 1. If last argument has limited number of values
2011
2012 1. Abort with error if number of values doesn't match or is too large
2013
2014 1. Abort with error if user didn't pass enough values (min_count)
2015
2016 """
2017 if args and not args_def:
2018 ToStderr("Error: Command %s expects no arguments", cmd)
2019 return False
2020
2021 min_count = None
2022 max_count = None
2023 check_max = None
2024
2025 last_idx = len(args_def) - 1
2026
2027 for idx, arg in enumerate(args_def):
2028 if min_count is None:
2029 min_count = arg.min
2030 elif arg.min is not None:
2031 min_count += arg.min
2032
2033 if max_count is None:
2034 max_count = arg.max
2035 elif arg.max is not None:
2036 max_count += arg.max
2037
2038 if idx == last_idx:
2039 check_max = (arg.max is not None)
2040
2041 elif arg.max is None:
2042 raise errors.ProgrammerError("Only the last argument can have max=None")
2043
2044 if check_max:
2045 # Command with exact number of arguments
2046 if (min_count is not None and max_count is not None and
2047 min_count == max_count and len(args) != min_count):
2048 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
2049 return False
2050
2051 # Command with limited number of arguments
2052 if max_count is not None and len(args) > max_count:
2053 ToStderr("Error: Command %s expects only %d argument(s)",
2054 cmd, max_count)
2055 return False
2056
2057 # Command with some required arguments
2058 if min_count is not None and len(args) < min_count:
2059 ToStderr("Error: Command %s expects at least %d argument(s)",
2060 cmd, min_count)
2061 return False
2062
2063 return True
2064
2065
2066 def SplitNodeOption(value):
2067 """Splits the value of a --node option.
2068
2069 """
2070 if value and ":" in value:
2071 return value.split(":", 1)
2072 else:
2073 return (value, None)
2074
2075
2076 def CalculateOSNames(os_name, os_variants):
2077 """Calculates all the names an OS can be called, according to its variants.
2078
2079 @type os_name: string
2080 @param os_name: base name of the os
2081 @type os_variants: list or None
2082 @param os_variants: list of supported variants
2083 @rtype: list
2084 @return: list of valid names
2085
2086 """
2087 if os_variants:
2088 return ["%s+%s" % (os_name, v) for v in os_variants]
2089 else:
2090 return [os_name]
2091
2092
2093 def ParseFields(selected, default):
2094 """Parses the values of "--field"-like options.
2095
2096 @type selected: string or None
2097 @param selected: User-selected options
2098 @type default: list
2099 @param default: Default fields
2100
2101 """
2102 if selected is None:
2103 return default
2104
2105 if selected.startswith("+"):
2106 return default + selected[1:].split(",")
2107
2108 return selected.split(",")
2109
2110
2111 UsesRPC = rpc.RunWithRPC
2112
2113
2114 def AskUser(text, choices=None):
2115 """Ask the user a question.
2116
2117 @param text: the question to ask
2118
2119 @param choices: list with elements tuples (input_char, return_value,
2120 description); if not given, it will default to: [('y', True,
2121 'Perform the operation'), ('n', False, 'Do no do the operation')];
2122 note that the '?' char is reserved for help
2123
2124 @return: one of the return values from the choices list; if input is
2125 not possible (i.e. not running with a tty, we return the last
2126 entry from the list
2127
2128 """
2129 if choices is None:
2130 choices = [("y", True, "Perform the operation"),
2131 ("n", False, "Do not perform the operation")]
2132 if not choices or not isinstance(choices, list):
2133 raise errors.ProgrammerError("Invalid choices argument to AskUser")
2134 for entry in choices:
2135 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
2136 raise errors.ProgrammerError("Invalid choices element to AskUser")
2137
2138 answer = choices[-1][1]
2139 new_text = []
2140 for line in text.splitlines():
2141 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
2142 text = "\n".join(new_text)
2143 try:
2144 f = file("/dev/tty", "a+")
2145 except IOError:
2146 return answer
2147 try:
2148 chars = [entry[0] for entry in choices]
2149 chars[-1] = "[%s]" % chars[-1]
2150 chars.append("?")
2151 maps = dict([(entry[0], entry[1]) for entry in choices])
2152 while True:
2153 f.write(text)
2154 f.write("\n")
2155 f.write("/".join(chars))
2156 f.write(": ")
2157 line = f.readline(2).strip().lower()
2158 if line in maps:
2159 answer = maps[line]
2160 break
2161 elif line == "?":
2162 for entry in choices:
2163 f.write(" %s - %s\n" % (entry[0], entry[2]))
2164 f.write("\n")
2165 continue
2166 finally:
2167 f.close()
2168 return answer
2169
2170
2171 class JobSubmittedException(Exception):
2172 """Job was submitted, client should exit.
2173
2174 This exception has one argument, the ID of the job that was
2175 submitted. The handler should print this ID.
2176
2177 This is not an error, just a structured way to exit from clients.
2178
2179 """
2180
2181
2182 def SendJob(ops, cl=None):
2183 """Function to submit an opcode without waiting for the results.
2184
2185 @type ops: list
2186 @param ops: list of opcodes
2187 @type cl: luxi.Client
2188 @param cl: the luxi client to use for communicating with the master;
2189 if None, a new client will be created
2190
2191 """
2192 if cl is None:
2193 cl = GetClient()
2194
2195 job_id = cl.SubmitJob(ops)
2196
2197 return job_id
2198
2199
2200 def GenericPollJob(job_id, cbs, report_cbs):
2201 """Generic job-polling function.
2202
2203 @type job_id: number
2204 @param job_id: Job ID
2205 @type cbs: Instance of L{JobPollCbBase}
2206 @param cbs: Data callbacks
2207 @type report_cbs: Instance of L{JobPollReportCbBase}
2208 @param report_cbs: Reporting callbacks
2209
2210 """
2211 prev_job_info = None
2212 prev_logmsg_serial = None
2213
2214 status = None
2215
2216 while True:
2217 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2218 prev_logmsg_serial)
2219 if not result:
2220 # job not found, go away!
2221 raise errors.JobLost("Job with id %s lost" % job_id)
2222
2223 if result == constants.JOB_NOTCHANGED:
2224 report_cbs.ReportNotChanged(job_id, status)
2225
2226 # Wait again
2227 continue
2228
2229 # Split result, a tuple of (field values, log entries)
2230 (job_info, log_entries) = result
2231 (status, ) = job_info
2232
2233 if log_entries:
2234 for log_entry in log_entries:
2235 (serial, timestamp, log_type, message) = log_entry
2236 report_cbs.ReportLogMessage(job_id, serial, timestamp,
2237 log_type, message)
2238 prev_logmsg_serial = max(prev_logmsg_serial, serial)
2239
2240 # TODO: Handle canceled and archived jobs
2241 elif status in (constants.JOB_STATUS_SUCCESS,
2242 constants.JOB_STATUS_ERROR,
2243 constants.JOB_STATUS_CANCELING,
2244 constants.JOB_STATUS_CANCELED):
2245 break
2246
2247 prev_job_info = job_info
2248
2249 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2250 if not jobs:
2251 raise errors.JobLost("Job with id %s lost" % job_id)
2252
2253 status, opstatus, result = jobs[0]
2254
2255 if status == constants.JOB_STATUS_SUCCESS:
2256 return result
2257
2258 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2259 raise errors.OpExecError("Job was canceled")
2260
2261 has_ok = False
2262 for idx, (status, msg) in enumerate(zip(opstatus, result)):
2263 if status == constants.OP_STATUS_SUCCESS:
2264 has_ok = True
2265 elif status == constants.OP_STATUS_ERROR:
2266 errors.MaybeRaise(msg)
2267
2268 if has_ok:
2269 raise errors.OpExecError("partial failure (opcode %d): %s" %
2270 (idx, msg))
2271
2272 raise errors.OpExecError(str(msg))
2273
2274 # default failure mode
2275 raise errors.OpExecError(result)
2276
2277
2278 class JobPollCbBase(object):
2279 """Base class for L{GenericPollJob} callbacks.
2280
2281 """
2282 def __init__(self):
2283 """Initializes this class.
2284
2285 """
2286
2287 def WaitForJobChangeOnce(self, job_id, fields,
2288 prev_job_info, prev_log_serial):
2289 """Waits for changes on a job.
2290
2291 """
2292 raise NotImplementedError()
2293
2294 def QueryJobs(self, job_ids, fields):
2295 """Returns the selected fields for the selected job IDs.
2296
2297 @type job_ids: list of numbers
2298 @param job_ids: Job IDs
2299 @type fields: list of strings
2300 @param fields: Fields
2301
2302 """
2303 raise NotImplementedError()
2304
2305
2306 class JobPollReportCbBase(object):
2307 """Base class for L{GenericPollJob} reporting callbacks.
2308
2309 """
2310 def __init__(self):
2311 """Initializes this class.
2312
2313 """
2314
2315 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2316 """Handles a log message.
2317
2318 """
2319 raise NotImplementedError()
2320
2321 def ReportNotChanged(self, job_id, status):
2322 """Called for if a job hasn't changed in a while.
2323
2324 @type job_id: number
2325 @param job_id: Job ID
2326 @type status: string or None
2327 @param status: Job status if available
2328
2329 """
2330 raise NotImplementedError()
2331
2332
2333 class _LuxiJobPollCb(JobPollCbBase):
2334 def __init__(self, cl):
2335 """Initializes this class.
2336
2337 """
2338 JobPollCbBase.__init__(self)
2339 self.cl = cl
2340
2341 def WaitForJobChangeOnce(self, job_id, fields,
2342 prev_job_info, prev_log_serial):
2343 """Waits for changes on a job.
2344
2345 """
2346 return self.cl.WaitForJobChangeOnce(job_id, fields,
2347 prev_job_info, prev_log_serial)
2348
2349 def QueryJobs(self, job_ids, fields):
2350 """Returns the selected fields for the selected job IDs.
2351
2352 """
2353 return self.cl.QueryJobs(job_ids, fields)
2354
2355
2356 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2357 def __init__(self, feedback_fn):
2358 """Initializes this class.
2359
2360 """
2361 JobPollReportCbBase.__init__(self)
2362
2363 self.feedback_fn = feedback_fn
2364
2365 assert callable(feedback_fn)
2366
2367 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2368 """Handles a log message.
2369
2370 """
2371 self.feedback_fn((timestamp, log_type, log_msg))
2372
2373 def ReportNotChanged(self, job_id, status):
2374 """Called if a job hasn't changed in a while.
2375
2376 """
2377 # Ignore
2378
2379
2380 class StdioJobPollReportCb(JobPollReportCbBase):
2381 def __init__(self):
2382 """Initializes this class.
2383
2384 """
2385 JobPollReportCbBase.__init__(self)
2386
2387 self.notified_queued = False
2388 self.notified_waitlock = False
2389
2390 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2391 """Handles a log message.
2392
2393 """
2394 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2395 FormatLogMessage(log_type, log_msg))
2396
2397 def ReportNotChanged(self, job_id, status):
2398 """Called if a job hasn't changed in a while.
2399
2400 """
2401 if status is None:
2402 return
2403
2404 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2405 ToStderr("Job %s is waiting in queue", job_id)
2406 self.notified_queued = True
2407
2408 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2409 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2410 self.notified_waitlock = True
2411
2412
2413 def FormatLogMessage(log_type, log_msg):
2414 """Formats a job message according to its type.
2415
2416 """
2417 if log_type != constants.ELOG_MESSAGE:
2418 log_msg = str(log_msg)
2419
2420 return utils.SafeEncode(log_msg)
2421
2422
2423 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2424 """Function to poll for the result of a job.
2425
2426 @type job_id: job identified
2427 @param job_id: the job to poll for results
2428 @type cl: luxi.Client
2429 @param cl: the luxi client to use for communicating with the master;
2430 if None, a new client will be created
2431
2432 """
2433 if cl is None:
2434 cl = GetClient()
2435
2436 if reporter is None:
2437 if feedback_fn:
2438 reporter = FeedbackFnJobPollReportCb(feedback_fn)
2439 else:
2440 reporter = StdioJobPollReportCb()
2441 elif feedback_fn:
2442 raise errors.ProgrammerError("Can't specify reporter and feedback function")
2443
2444 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2445
2446
2447 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2448 """Legacy function to submit an opcode.
2449
2450 This is just a simple wrapper over the construction of the processor
2451 instance. It should be extended to better handle feedback and
2452 interaction functions.
2453
2454 """
2455 if cl is None:
2456 cl = GetClient()
2457
2458 SetGenericOpcodeOpts([op], opts)
2459
2460 job_id = SendJob([op], cl=cl)
2461 if hasattr(opts, "print_jobid") and opts.print_jobid:
2462 ToStdout("%d" % job_id)
2463
2464 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2465 reporter=reporter)
2466
2467 return op_results[0]
2468
2469
2470 def SubmitOpCodeToDrainedQueue(op):
2471 """Forcefully insert a job in the queue, even if it is drained.
2472
2473 """
2474 cl = GetClient()
2475 job_id = cl.SubmitJobToDrainedQueue([op])
2476 op_results = PollJob(job_id, cl=cl)
2477 return op_results[0]
2478
2479
2480 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2481 """Wrapper around SubmitOpCode or SendJob.
2482
2483 This function will decide, based on the 'opts' parameter, whether to
2484 submit and wait for the result of the opcode (and return it), or
2485 whether to just send the job and print its identifier. It is used in
2486 order to simplify the implementation of the '--submit' option.
2487
2488 It will also process the opcodes if we're sending the via SendJob
2489 (otherwise SubmitOpCode does it).
2490
2491 """
2492 if opts and opts.submit_only:
2493 job = [op]
2494 SetGenericOpcodeOpts(job, opts)
2495 job_id = SendJob(job, cl=cl)
2496 if opts.print_jobid:
2497 ToStdout("%d" % job_id)
2498 raise JobSubmittedException(job_id)
2499 else:
2500 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2501
2502
2503 def _InitReasonTrail(op, opts):
2504 """Builds the first part of the reason trail
2505
2506 Builds the initial part of the reason trail, adding the user provided reason
2507 (if it exists) and the name of the command starting the operation.
2508
2509 @param op: the opcode the reason trail will be added to
2510 @param opts: the command line options selected by the user
2511
2512 """
2513 assert len(sys.argv) >= 2
2514 trail = []
2515
2516 if opts.reason:
2517 trail.append((constants.OPCODE_REASON_SRC_USER,
2518 opts.reason,
2519 utils.EpochNano()))
2520
2521 binary = os.path.basename(sys.argv[0])
2522 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2523 command = sys.argv[1]
2524 trail.append((source, command, utils.EpochNano()))
2525 op.reason = trail
2526
2527
2528 def SetGenericOpcodeOpts(opcode_list, options):
2529 """Processor for generic options.
2530
2531 This function updates the given opcodes based on generic command
2532 line options (like debug, dry-run, etc.).
2533
2534 @param opcode_list: list of opcodes
2535 @param options: command line options or None
2536 @return: None (in-place modification)
2537
2538 """
2539 if not options:
2540 return
2541 for op in opcode_list:
2542 op.debug_level = options.debug
2543 if hasattr(options, "dry_run"):
2544 op.dry_run = options.dry_run
2545 if getattr(options, "priority", None) is not None:
2546 op.priority = options.priority
2547 _InitReasonTrail(op, options)
2548
2549
2550 def FormatError(err):
2551 """Return a formatted error message for a given error.
2552
2553 This function takes an exception instance and returns a tuple
2554 consisting of two values: first, the recommended exit code, and
2555 second, a string describing the error message (not
2556 newline-terminated).
2557
2558 """
2559 retcode = 1
2560 obuf = StringIO()
2561 msg = str(err)
2562 if isinstance(err, errors.ConfigurationError):
2563 txt = "Corrupt configuration file: %s" % msg
2564 logging.error(txt)
2565 obuf.write(txt + "\n")
2566 obuf.write("Aborting.")
2567 retcode = 2
2568 elif isinstance(err, errors.HooksAbort):
2569 obuf.write("Failure: hooks execution failed:\n")
2570 for node, script, out in err.args[0]:
2571 if out:
2572 obuf.write(" node: %s, script: %s, output: %s\n" %
2573 (node, script, out))
2574 else:
2575 obuf.write(" node: %s, script: %s (no output)\n" %
2576 (node, script))
2577 elif isinstance(err, errors.HooksFailure):
2578 obuf.write("Failure: hooks general failure: %s" % msg)
2579 elif isinstance(err, errors.ResolverError):
2580 this_host = netutils.Hostname.GetSysName()
2581 if err.args[0] == this_host:
2582 msg = "Failure: can't resolve my own hostname ('%s')"
2583 else:
2584 msg = "Failure: can't resolve hostname '%s'"
2585 obuf.write(msg % err.args[0])
2586 elif isinstance(err, errors.OpPrereqError):
2587 if len(err.args) == 2:
2588 obuf.write("Failure: prerequisites not met for this"
2589 " operation:\nerror type: %s, error details:\n%s" %
2590 (err.args[1], err.args[0]))
2591 else:
2592 obuf.write("Failure: prerequisites not met for this"
2593 " operation:\n%s" % msg)
2594 elif isinstance(err, errors.OpExecError):
2595 obuf.write("Failure: command execution error:\n%s" % msg)
2596 elif isinstance(err, errors.TagError):
2597 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2598 elif isinstance(err, errors.JobQueueDrainError):
2599 obuf.write("Failure: the job queue is marked for drain and doesn't"
2600 " accept new requests\n")
2601 elif isinstance(err, errors.JobQueueFull):
2602 obuf.write("Failure: the job queue is full and doesn't accept new"
2603 " job submissions until old jobs are archived\n")
2604 elif isinstance(err, errors.TypeEnforcementError):
2605 obuf.write("Parameter Error: %s" % msg)
2606 elif isinstance(err, errors.ParameterError):
2607 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2608 elif isinstance(err, rpcerr.NoMasterError):
2609 if err.args[0] == pathutils.MASTER_SOCKET:
2610 daemon = "the master daemon"
2611 elif err.args[0] == pathutils.QUERY_SOCKET:
2612 daemon = "the config daemon"
2613 else:
2614 daemon = "socket '%s'" % str(err.args[0])
2615 obuf.write("Cannot communicate with %s.\nIs the process running"
2616 " and listening for connections?" % daemon)
2617 elif isinstance(err, rpcerr.TimeoutError):
2618 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2619 " been submitted and will continue to run even if the call"
2620 " timed out. Useful commands in this situation are \"gnt-job"
2621 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2622 obuf.write(msg)
2623 elif isinstance(err, rpcerr.PermissionError):
2624 obuf.write("It seems you don't have permissions to connect to the"
2625 " master daemon.\nPlease retry as a different user.")
2626 elif isinstance(err, rpcerr.ProtocolError):
2627 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2628 "%s" % msg)
2629 elif isinstance(err, errors.JobLost):
2630 obuf.write("Error checking job status: %s" % msg)
2631 elif isinstance(err, errors.QueryFilterParseError):
2632 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2633 obuf.write("\n".join(err.GetDetails()))
2634 elif isinstance(err, errors.GenericError):
2635 obuf.write("Unhandled Ganeti error: %s" % msg)
2636 elif isinstance(err, JobSubmittedException):
2637 obuf.write("JobID: %s\n" % err.args[0])
2638 retcode = 0
2639 else:
2640 obuf.write("Unhandled exception: %s" % msg)
2641 return retcode, obuf.getvalue().rstrip("\n")
2642
2643
2644 def GenericMain(commands, override=None, aliases=None,
2645 env_override=frozenset()):
2646 """Generic main function for all the gnt-* commands.
2647
2648 @param commands: a dictionary with a special structure, see the design doc
2649 for command line handling.
2650 @param override: if not None, we expect a dictionary with keys that will
2651 override command line options; this can be used to pass
2652 options from the scripts to generic functions
2653 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2654 @param env_override: list of environment names which are allowed to submit
2655 default args for commands
2656
2657 """
2658 # save the program name and the entire command line for later logging
2659 if sys.argv:
2660 binary = os.path.basename(sys.argv[0])
2661 if not binary:
2662 binary = sys.argv[0]
2663
2664 if len(sys.argv) >= 2:
2665 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2666 else:
2667 logname = binary
2668
2669 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2670 else:
2671 binary = "<unknown program>"
2672 cmdline = "<unknown>"
2673
2674 if aliases is None:
2675 aliases = {}
2676
2677 try:
2678 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2679 env_override)
2680 except _ShowVersion:
2681 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2682 constants.RELEASE_VERSION)
2683 return constants.EXIT_SUCCESS
2684 except _ShowUsage, err:
2685 for line in _FormatUsage(binary, commands):
2686 ToStdout(line)
2687
2688 if err.exit_error:
2689 return constants.EXIT_FAILURE
2690 else:
2691 return constants.EXIT_SUCCESS
2692 except errors.ParameterError, err:
2693 result, err_msg = FormatError(err)
2694 ToStderr(err_msg)
2695 return 1
2696
2697 if func is None: # parse error
2698 return 1
2699
2700 if override is not None:
2701 for key, val in override.iteritems():
2702 setattr(options, key, val)
2703
2704 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2705 stderr_logging=True)
2706
2707 logging.debug("Command line: %s", cmdline)
2708
2709 try:
2710 result = func(options, args)
2711 except (errors.GenericError, rpcerr.ProtocolError,
2712 JobSubmittedException), err:
2713 result, err_msg = FormatError(err)
2714 logging.exception("Error during command processing")
2715 ToStderr(err_msg)
2716 except KeyboardInterrupt:
2717 result = constants.EXIT_FAILURE
2718 ToStderr("Aborted. Note that if the operation created any jobs, they"
2719 " might have been submitted and"
2720 " will continue to run in the background.")
2721 except IOError, err:
2722 if err.errno == errno.EPIPE:
2723 # our terminal went away, we'll exit
2724 sys.exit(constants.EXIT_FAILURE)
2725 else:
2726 raise
2727
2728 return result
2729
2730
2731 def ParseNicOption(optvalue):
2732 """Parses the value of the --net option(s).
2733
2734 """
2735 try:
2736 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2737 except (TypeError, ValueError), err:
2738 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2739 errors.ECODE_INVAL)
2740
2741 nics = [{}] * nic_max
2742 for nidx, ndict in optvalue:
2743 nidx = int(nidx)
2744
2745 if not isinstance(ndict, dict):
2746 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2747 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2748
2749 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2750
2751 nics[nidx] = ndict
2752
2753 return nics
2754
2755
2756 def FixHvParams(hvparams):
2757 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2758 # comma to space because commas cannot be accepted on the command line
2759 # (they already act as the separator between different hvparams). Still,
2760 # RAPI should be able to accept commas for backwards compatibility.
2761 # Therefore, we convert spaces into commas here, and we keep the old
2762 # parsing logic everywhere else.
2763 try:
2764 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2765 hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2766 except KeyError:
2767 #No usb_devices, no modification required
2768 pass
2769
2770
2771 def GenericInstanceCreate(mode, opts, args):
2772 """Add an instance to the cluster via either creation or import.
2773
2774 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2775 @param opts: the command line options selected by the user
2776 @type args: list
2777 @param args: should contain only one element, the new instance name
2778 @rtype: int
2779 @return: the desired exit code
2780
2781 """
2782 instance = args[0]
2783
2784 (pnode, snode) = SplitNodeOption(opts.node)
2785
2786 hypervisor = None
2787 hvparams = {}
2788 if opts.hypervisor:
2789 hypervisor, hvparams = opts.hypervisor
2790
2791 if opts.nics:
2792 nics = ParseNicOption(opts.nics)
2793 elif opts.no_nics:
2794 # no nics
2795 nics = []
2796 elif mode == constants.INSTANCE_CREATE:
2797 # default of one nic, all auto
2798 nics = [{}]
2799 else:
2800 # mode == import
2801 nics = []
2802
2803 if opts.disk_template == constants.DT_DISKLESS:
2804 if opts.disks or opts.sd_size is not None:
2805 raise errors.OpPrereqError("Diskless instance but disk"
2806 " information passed", errors.ECODE_INVAL)
2807 disks = []
2808 else:
2809 if (not opts.disks and not opts.sd_size
2810 and mode == constants.INSTANCE_CREATE):
2811 raise errors.OpPrereqError("No disk information specified",
2812 errors.ECODE_INVAL)
2813 if opts.disks and opts.sd_size is not None:
2814 raise errors.OpPrereqError("Please use either the '--disk' or"
2815 " '-s' option", errors.ECODE_INVAL)
2816 if opts.sd_size is not None:
2817 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2818
2819 if opts.disks:
2820 try:
2821 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2822 except ValueError, err:
2823 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2824 errors.ECODE_INVAL)
2825 disks = [{}] * disk_max
2826 else:
2827 disks = []
2828 for didx, ddict in opts.disks:
2829 didx = int(didx)
2830 if not isinstance(ddict, dict):
2831 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2832 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2833 elif constants.IDISK_SIZE in ddict:
2834 if constants.IDISK_ADOPT in ddict:
2835 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2836 " (disk %d)" % didx, errors.ECODE_INVAL)
2837 try:
2838 ddict[constants.IDISK_SIZE] = \
2839 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2840 except ValueError, err:
2841 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2842 (didx, err), errors.ECODE_INVAL)
2843 elif constants.IDISK_ADOPT in ddict:
2844 if constants.IDISK_SPINDLES in ddict:
2845 raise errors.OpPrereqError("spindles is not a valid option when"
2846 " adopting a disk", errors.ECODE_INVAL)
2847 if mode == constants.INSTANCE_IMPORT:
2848 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2849 " import", errors.ECODE_INVAL)
2850 ddict[constants.IDISK_SIZE] = 0
2851 else:
2852 raise errors.OpPrereqError("Missing size or adoption source for"
2853 " disk %d" % didx, errors.ECODE_INVAL)
2854 if constants.IDISK_SPINDLES in ddict:
2855 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2856
2857 disks[didx] = ddict
2858
2859 if opts.tags is not None:
2860 tags = opts.tags.split(",")
2861 else:
2862 tags = []
2863
2864 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2865 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2866 FixHvParams(hvparams)
2867
2868 osparams_private = opts.osparams_private or serializer.PrivateDict()
2869 osparams_secret = opts.osparams_secret or serializer.PrivateDict()
2870
2871 helper_startup_timeout = opts.helper_startup_timeout
2872 helper_shutdown_timeout = opts.helper_shutdown_timeout
2873
2874 if mode == constants.INSTANCE_CREATE:
2875 start = opts.start
2876 os_type = opts.os
2877 force_variant = opts.force_variant
2878 src_node = None
2879 src_path = None
2880 no_install = opts.no_install
2881 identify_defaults = False
2882 compress = constants.IEC_NONE
2883 if opts.instance_communication is None:
2884 instance_communication = False
2885 else:
2886 instance_communication = opts.instance_communication
2887 elif mode == constants.INSTANCE_IMPORT:
2888 start = False
2889 os_type = None
2890 force_variant = False
2891 src_node = opts.src_node
2892 src_path = opts.src_dir
2893 no_install = None
2894 identify_defaults = opts.identify_defaults
2895 compress = opts.compress
2896 instance_communication = False
2897 else:
2898 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2899
2900 op = opcodes.OpInstanceCreate(
2901 instance_name=instance,
2902 disks=disks,
2903 disk_template=opts.disk_template,
2904 nics=nics,
2905 conflicts_check=opts.conflicts_check,
2906 pnode=pnode, snode=snode,
2907 ip_check=opts.ip_check,
2908 name_check=opts.name_check,
2909 wait_for_sync=opts.wait_for_sync,
2910 file_storage_dir=opts.file_storage_dir,
2911 file_driver=opts.file_driver,
2912 iallocator=opts.iallocator,
2913 hypervisor=hypervisor,
2914 hvparams=hvparams,
2915 beparams=opts.beparams,
2916 osparams=opts.osparams,
2917 osparams_private=osparams_private,
2918 osparams_secret=osparams_secret,
2919 mode=mode,
2920 opportunistic_locking=opts.opportunistic_locking,
2921 start=start,
2922 os_type=os_type,
2923 force_variant=force_variant,
2924 src_node=src_node,
2925 src_path=src_path,
2926 compress=compress,
2927 tags=tags,
2928 no_install=no_install,
2929 identify_defaults=identify_defaults,
2930 ignore_ipolicy=opts.ignore_ipolicy,
2931 instance_communication=instance_communication,
2932 helper_startup_timeout=helper_startup_timeout,
2933 helper_shutdown_timeout=helper_shutdown_timeout)
2934
2935 SubmitOrSend(op, opts)
2936 return 0
2937
2938
2939 class _RunWhileDaemonsStoppedHelper(object):
2940 """Helper class for L{RunWhileDaemonsStopped} to simplify state management
2941
2942 """
2943 def __init__(self, feedback_fn, cluster_name, master_node,
2944 online_nodes, ssh_ports, exclude_daemons):
2945 """Initializes this class.
2946
2947 @type feedback_fn: callable
2948 @param feedback_fn: Feedback function
2949 @type cluster_name: string
2950 @param cluster_name: Cluster name
2951 @type master_node: string
2952 @param master_node Master node name
2953 @type online_nodes: list
2954 @param online_nodes: List of names of online nodes
2955 @type ssh_ports: list
2956 @param ssh_ports: List of SSH ports of online nodes
2957 @type exclude_daemons: list of string
2958 @param exclude_daemons: list of daemons to shutdown
2959 @param exclude_daemons: list of daemons that will be restarted after
2960 all others are shutdown
2961
2962 """
2963 self.feedback_fn = feedback_fn
2964 self.cluster_name = cluster_name
2965 self.master_node = master_node
2966 self.online_nodes = online_nodes
2967 self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2968
2969 self.ssh = ssh.SshRunner(self.cluster_name)
2970
2971 self.nonmaster_nodes = [name for name in online_nodes
2972 if name != master_node]
2973
2974 self.exclude_daemons = exclude_daemons
2975
2976 assert self.master_node not in self.nonmaster_nodes
2977
2978 def _RunCmd(self, node_name, cmd):
2979 """Runs a command on the local or a remote machine.
2980
2981 @type node_name: string
2982 @param node_name: Machine name
2983 @type cmd: list
2984 @param cmd: Command
2985
2986 """
2987 if node_name is None or node_name == self.master_node:
2988 # No need to use SSH
2989 result = utils.RunCmd(cmd)
2990 else:
2991 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2992 utils.ShellQuoteArgs(cmd),
2993 port=self.ssh_ports[node_name])
2994
2995 if result.failed:
2996 errmsg = ["Failed to run command %s" % result.cmd]
2997 if node_name:
2998 errmsg.append("on node %s" % node_name)
2999 errmsg.append(": exitcode %s and error %s" %
3000 (result.exit_code, result.output))
3001 raise errors.OpExecError(" ".join(errmsg))
3002
3003 def Call(self, fn, *args):
3004 """Call function while all daemons are stopped.
3005
3006 @type fn: callable
3007 @param fn: Function to be called
3008
3009 """
3010 # Pause watcher by acquiring an exclusive lock on watcher state file
3011 self.feedback_fn("Blocking watcher")
3012 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
3013 try:
3014 # TODO: Currently, this just blocks. There's no timeout.
3015 # TODO: Should it be a shared lock?
3016 watcher_block.Exclusive(blocking=True)
3017
3018 # Stop master daemons, so that no new jobs can come in and all running
3019 # ones are finished
3020 self.feedback_fn("Stopping master daemons")
3021 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
3022 try:
3023 # Stop daemons on all nodes
3024 for node_name in self.online_nodes:
3025 self.feedback_fn("Stopping daemons on %s" % node_name)
3026 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
3027 # Starting any daemons listed as exception
3028 for daemon in self.exclude_daemons:
3029 if (daemon in constants.DAEMONS_MASTER and
3030 node_name != self.master_node):
3031 continue
3032 self.feedback_fn("Starting daemon '%s' on %s" % (daemon, node_name))
3033 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start", daemon])
3034
3035 # All daemons are shut down now
3036 try:
3037 return fn(self, *args)
3038 except Exception, err:
3039 _, errmsg = FormatError(err)
3040 logging.exception("Caught exception")
3041 self.feedback_fn(errmsg)
3042 raise
3043 finally:
3044 # Start cluster again, master node last
3045 for node_name in self.nonmaster_nodes + [self.master_node]:
3046 # Stopping any daemons listed as exception.
3047 # This might look unnecessary, but it makes sure that daemon-util
3048 # starts all daemons in the right order.
3049 for daemon in self.exclude_daemons:
3050 if (daemon in constants.DAEMONS_MASTER and
3051 node_name != self.master_node):
3052 continue
3053 self.feedback_fn("Stopping daemon '%s' on %s" % (daemon, node_name))
3054 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop", daemon])
3055 self.feedback_fn("Starting daemons on %s" % node_name)
3056 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
3057
3058 finally:
3059 # Resume watcher
3060 watcher_block.Close()
3061
3062
3063 def RunWhileDaemonsStopped(feedback_fn, exclude_daemons, fn, *args):
3064 """Calls a function while all cluster daemons are stopped.
3065
3066 @type feedback_fn: callable
3067 @param feedback_fn: Feedback function
3068 @type exclude_daemons: list of string
3069 @param exclude_daemons: list of daemons that are NOT stopped. If None,
3070 all daemons will be stopped.
3071 @type fn: callable
3072 @param fn: Function to be called when daemons are stopped
3073
3074 """
3075 feedback_fn("Gathering cluster information")
3076
3077 # This ensures we're running on the master daemon
3078 cl = GetClient()
3079
3080 (cluster_name, master_node) = \
3081 cl.QueryConfigValues(["cluster_name", "master_node"])
3082
3083 online_nodes = GetOnlineNodes([], cl=cl)
3084 ssh_ports = GetNodesSshPorts(online_nodes, cl)
3085
3086 # Don't keep a reference to the client. The master daemon will go away.
3087 del cl
3088
3089 assert master_node in online_nodes
3090 if exclude_daemons is None:
3091 exclude_daemons = []
3092
3093 return _RunWhileDaemonsStoppedHelper(
3094 feedback_fn, cluster_name, master_node, online_nodes, ssh_ports,
3095 exclude_daemons).Call(fn, *args)
3096
3097
3098 def RunWhileClusterStopped(feedback_fn, fn, *args):
3099 """Calls a function while all cluster daemons are stopped.
3100
3101 @type feedback_fn: callable
3102 @param feedback_fn: Feedback function
3103 @type fn: callable
3104 @param fn: Function to be called when daemons are stopped
3105
3106 """
3107 RunWhileDaemonsStopped(feedback_fn, None, fn, *args)
3108
3109
3110 def GenerateTable(headers, fields, separator, data,
3111 numfields=None, unitfields=None,
3112 units=None):
3113 """Prints a table with headers and different fields.
3114
3115 @type headers: dict
3116 @param headers: dictionary mapping field names to headers for
3117 the table
3118 @type fields: list
3119 @param fields: the field names corresponding to each row in
3120 the data field
3121 @param separator: the separator to be used; if this is None,
3122 the default 'smart' algorithm is used which computes optimal
3123 field width, otherwise just the separator is used between
3124 each field
3125 @type data: list
3126 @param data: a list of lists, each sublist being one row to be output
3127 @type numfields: list
3128 @param numfields: a list with the fields that hold numeric
3129 values and thus should be right-aligned
3130 @type unitfields: list
3131 @param unitfields: a list with the fields that hold numeric
3132 values that should be formatted with the units field
3133 @type units: string or None
3134 @param units: the units we should use for formatting, or None for
3135 automatic choice (human-readable for non-separator usage, otherwise
3136 megabytes); this is a one-letter string
3137
3138 """
3139 if units is None:
3140 if separator:
3141 units = "m"
3142 else:
3143 units = "h"
3144
3145 if numfields is None:
3146 numfields = []
3147 if unitfields is None:
3148 unitfields = []
3149
3150 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
3151 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
3152
3153 format_fields = []
3154 for field in fields:
3155 if headers and field not in headers:
3156 # TODO: handle better unknown fields (either revert to old
3157 # style of raising exception, or deal more intelligently with
3158 # variable fields)
3159 headers[field] = field
3160 if separator is not None:
3161 format_fields.append("%s")
3162 elif numfields.Matches(field):
3163 format_fields.append("%*s")
3164 else:
3165 format_fields.append("%-*s")
3166
3167 if separator is None:
3168 mlens = [0 for name in fields]
3169 format_str = " ".join(format_fields)
3170 else:
3171 format_str = separator.replace("%", "%%").join(format_fields)
3172
3173 for row in data:
3174 if row is None:
3175 continue
3176 for idx, val in enumerate(row):
3177 if unitfields.Matches(fields[idx]):
3178 try:
3179 val = int(val)
3180 except (TypeError, ValueError):
3181 pass
3182 else:
3183 val = row[idx] = utils.FormatUnit(val, units)
3184 val = row[idx] = str(val)
3185 if separator is None:
3186 mlens[idx] = max(mlens[idx], len(val))
3187
3188 result = []
3189 if headers:
3190 args = []
3191 for idx, name in enumerate(fields):
3192 hdr = headers[name]
3193 if separator is None:
3194 mlens[idx] = max(mlens[idx], len(hdr))
3195 args.append(mlens[idx])
3196 args.append(hdr)
3197 result.append(format_str % tuple(args))
3198
3199 if separator is None:
3200 assert len(mlens) == len(fields)
3201
3202 if fields and not numfields.Matches(fields[-1]):
3203 mlens[-1] = 0
3204
3205 for line in data:
3206 args = []
3207 if line is None:
3208 line = ["-" for _ in fields]
3209 for idx in range(len(fields)):
3210 if separator is None:
3211 args.append(mlens[idx])
3212 args.append(line[idx])
3213 result.append(format_str % tuple(args))
3214
3215 return result
3216
3217
3218 def _FormatBool(value):
3219 """Formats a boolean value as a string.
3220
3221 """
3222 if value:
3223 return "Y"
3224 return "N"
3225
3226
3227 #: Default formatting for query results; (callback, align right)
3228 _DEFAULT_FORMAT_QUERY = {
3229 constants.QFT_TEXT: (str, False),
3230 constants.QFT_BOOL: (_FormatBool, False),
3231 constants.QFT_NUMBER: (str, True),
3232 constants.QFT_NUMBER_FLOAT: (str, True),
3233 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3234 constants.QFT_OTHER: (str, False),
3235 constants.QFT_UNKNOWN: (str, False),
3236 }
3237
3238
3239 def _GetColumnFormatter(fdef, override, unit):
3240 """Returns formatting function for a field.
3241
3242 @type fdef: L{objects.QueryFieldDefinition}
3243 @type override: dict
3244 @param override: Dictionary for overriding field formatting functions,
3245 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3246 @type unit: string
3247 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3248 @rtype: tuple; (callable, bool)
3249 @return: Returns the function to format a value (takes one parameter) and a
3250 boolean for aligning the value on the right-hand side
3251
3252 """
3253 fmt = override.get(fdef.name, None)
3254 if fmt is not None:
3255 return fmt
3256
3257 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3258
3259 if fdef.kind == constants.QFT_UNIT:
3260 # Can't keep this information in the static dictionary
3261 return (lambda value: utils.FormatUnit(value, unit), True)
3262
3263 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3264 if fmt is not None:
3265 return fmt
3266
3267 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3268
3269
3270 class _QueryColumnFormatter(object):
3271 """Callable class for formatting fields of a query.
3272
3273 """
3274 def __init__(self, fn, status_fn, verbose):
3275 """Initializes this class.
3276
3277 @type fn: callable
3278 @param fn: Formatting function
3279 @type status_fn: callable
3280 @param status_fn: Function to report fields' status
3281 @type verbose: boolean
3282 @param verbose: whether to use verbose field descriptions or not
3283
3284 """
3285 self._fn = fn
3286 self._status_fn = status_fn
3287 self._verbose = verbose
3288
3289 def __call__(self, data):
3290 """Returns a field's string representation.
3291
3292 """
3293 (status, value) = data
3294
3295 # Report status
3296 self._status_fn(status)
3297
3298 if status == constants.RS_NORMAL:
3299 return self._fn(value)
3300
3301 assert value is None, \
3302 "Found value %r for abnormal status %s" % (value, status)
3303
3304 return FormatResultError(status, self._verbose)
3305
3306
3307 def FormatResultError(status, verbose):
3308 """Formats result status other than L{constants.RS_NORMAL}.
3309
3310 @param status: The result status
3311 @type verbose: boolean
3312 @param verbose: Whether to return the verbose text
3313 @return: Text of result status
3314
3315 """
3316 assert status != constants.RS_NORMAL, \
3317 "FormatResultError called with status equal to constants.RS_NORMAL"
3318 try:
3319 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3320 except KeyError:
3321 raise NotImplementedError("Unknown status %s" % status)
3322 else:
3323 if verbose:
3324 return verbose_text
3325 return normal_text
3326
3327
3328 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3329 header=False, verbose=False):
3330 """Formats data in L{objects.QueryResponse}.
3331
3332 @type result: L{objects.QueryResponse}
3333 @param result: result of query operation
3334 @type unit: string
3335 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3336 see L{utils.text.FormatUnit}
3337 @type format_override: dict
3338 @param format_override: Dictionary for overriding field formatting functions,
3339 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3340 @type separator: string or None
3341 @param separator: String used to separate fields
3342 @type header: bool
3343 @param header: Whether to output header row
3344 @type verbose: boolean
3345 @param verbose: whether to use verbose field descriptions or not
3346
3347 """
3348 if unit is None:
3349 if separator:
3350 unit = "m"
3351 else:
3352 unit = "h"
3353
3354 if format_override is None:
3355 format_override = {}
3356
3357 stats = dict.fromkeys(constants.RS_ALL, 0)
3358
3359 def _RecordStatus(status):
3360 if status in stats:
3361 stats[status] += 1
3362
3363 columns = []
3364 for fdef in result.fields:
3365 assert fdef.title and fdef.name
3366 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3367 columns.append(TableColumn(fdef.title,
3368 _QueryColumnFormatter(fn, _RecordStatus,
3369 verbose),
3370 align_right))
3371
3372 table = FormatTable(result.data, columns, header, separator)
3373
3374 # Collect statistics
3375 assert len(stats) == len(constants.RS_ALL)
3376 assert compat.all(count >= 0 for count in stats.values())
3377
3378 # Determine overall status. If there was no data, unknown fields must be
3379 # detected via the field definitions.
3380 if (stats[constants.RS_UNKNOWN] or
3381 (not result.data and _GetUnknownFields(result.fields))):
3382 status = QR_UNKNOWN
3383 elif compat.any(count > 0 for key, count in stats.items()
3384 if key != constants.RS_NORMAL):
3385 status = QR_INCOMPLETE
3386 else:
3387 status = QR_NORMAL
3388
3389 return (status, table)
3390
3391
3392 def _GetUnknownFields(fdefs):
3393 """Returns list of unknown fields included in C{fdefs}.
3394
3395 @type fdefs: list of L{objects.QueryFieldDefinition}
3396
3397 """
3398 return [fdef for fdef in fdefs
3399 if fdef.kind == constants.QFT_UNKNOWN]
3400
3401
3402 def _WarnUnknownFields(fdefs):
3403 """Prints a warning to stderr if a query included unknown fields.
3404
3405 @type fdefs: list of L{objects.QueryFieldDefinition}
3406
3407 """
3408 unknown = _GetUnknownFields(fdefs)
3409 if unknown:
3410 ToStderr("Warning: Queried for unknown fields %s",
3411 utils.CommaJoin(fdef.name for fdef in unknown))
3412 return True
3413
3414 return False
3415
3416
3417 def GenericList(resource, fields, names, unit, separator, header, cl=None,
3418 format_override=None, verbose=False, force_filter=False,
3419 namefield=None, qfilter=None, isnumeric=False):
3420 """Generic implementation for listing all items of a resource.
3421
3422 @param resource: One of L{constants.QR_VIA_LUXI}
3423 @type fields: list of strings
3424 @param fields: List of fields to query for
3425 @type names: list of strings
3426 @param names: Names of items to query for
3427 @type unit: string or None
3428 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3429 None for automatic choice (human-readable for non-separator usage,
3430 otherwise megabytes); this is a one-letter string
3431 @type separator: string or None
3432 @param separator: String used to separate fields
3433 @type header: bool
3434 @param header: Whether to show header row
3435 @type force_filter: bool
3436 @param force_filter: Whether to always treat names as filter
3437 @type format_override: dict
3438 @param format_override: Dictionary for overriding field formatting functions,
3439 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3440 @type verbose: boolean
3441 @param verbose: whether to use verbose field descriptions or not
3442 @type namefield: string
3443 @param namefield: Name of field to use for simple filters (see
3444 L{qlang.MakeFilter} for details)
3445 @type qfilter: list or None
3446 @param qfilter: Query filter (in addition to names)
3447 @param isnumeric: bool
3448 @param isnumeric: Whether the namefield's type is numeric, and therefore
3449 any simple filters built by namefield should use integer values to
3450 reflect that
3451
3452 """
3453 if not names:
3454 names = None
3455
3456 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3457 isnumeric=isnumeric)
3458
3459 if qfilter is None:
3460 qfilter = namefilter
3461 elif namefilter is not None:
3462 qfilter = [qlang.OP_AND, namefilter, qfilter]
3463
3464 if cl is None:
3465 cl = GetClient()
3466
3467 response = cl.Query(resource, fields, qfilter)
3468
3469 found_unknown = _WarnUnknownFields(response.fields)
3470
3471 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3472 header=header,
3473 format_override=format_override,
3474 verbose=verbose)
3475
3476 for line in data:
3477 ToStdout(line)
3478
3479 assert ((found_unknown and status == QR_UNKNOWN) or
3480 (not found_unknown and status != QR_UNKNOWN))
3481
3482 if status == QR_UNKNOWN:
3483 return constants.EXIT_UNKNOWN_FIELD
3484
3485 # TODO: Should the list command fail if not all data could be collected?
3486 return constants.EXIT_SUCCESS
3487
3488
3489 def _FieldDescValues(fdef):
3490 """Helper function for L{GenericListFields} to get query field description.
3491
3492 @type fdef: L{objects.QueryFieldDefinition}
3493 @rtype: list
3494
3495 """
3496 return [
3497 fdef.name,
3498 _QFT_NAMES.get(fdef.kind, fdef.kind),
3499 fdef.title,
3500 fdef.doc,
3501 ]
3502
3503
3504 def GenericListFields(resource, fields, separator, header, cl=None):
3505 """Generic implementation for listing fields for a resource.
3506
3507 @param resource: One of L{constants.QR_VIA_LUXI}
3508 @type fields: list of strings
3509 @param fields: List of fields to query for
3510 @type separator: string or None
3511 @param separator: String used to separate fields
3512 @type header: bool
3513 @param header: Whether to show header row
3514
3515 """
3516 if cl is None:
3517 cl = GetClient()
3518
3519 if not fields:
3520 fields = None
3521
3522 response = cl.QueryFields(resource, fields)
3523
3524 found_unknown = _WarnUnknownFields(response.fields)
3525
3526 columns = [
3527 TableColumn("Name", str, False),
3528 TableColumn("Type", str, False),
3529 TableColumn("Title", str, False),
3530 TableColumn("Description", str, False),
3531 ]
3532
3533 rows = map(_FieldDescValues, response.fields)
3534
3535 for line in FormatTable(rows, columns, header, separator):
3536 ToStdout(line)
3537
3538 if found_unknown:
3539 return constants.EXIT_UNKNOWN_FIELD
3540
3541 return constants.EXIT_SUCCESS
3542
3543
3544 class TableColumn(object):
3545 """Describes a column for L{FormatTable}.
3546
3547 """
3548 def __init__(self, title, fn, align_right):
3549 """Initializes this class.
3550
3551 @type title: string
3552 @param title: Column title
3553 @type fn: callable
3554 @param fn: Formatting function
3555 @type align_right: bool
3556 @param align_right: Whether to align values on the right-hand side
3557
3558 """
3559 self.title = title
3560 self.format = fn
3561 self.align_right = align_right
3562
3563
3564 def _GetColFormatString(width, align_right):
3565 """Returns the format string for a field.
3566
3567 """
3568 if align_right:
3569 sign = ""
3570 else:
3571 sign = "-"
3572
3573 return "%%%s%ss" % (sign, width)
3574
3575
3576 def FormatTable(rows, columns, header, separator):
3577 """Formats data as a table.
3578
3579 @type rows: list of lists
3580 @param rows: Row data, one list per row
3581 @type columns: list of L{TableColumn}
3582 @param columns: Column descriptions
3583 @type header: bool
3584 @param header: Whether to show header row
3585 @type separator: string or None
3586 @param separator: String used to separate columns
3587
3588 """
3589 if header:
3590 data = [[col.title for col in columns]]
3591 colwidth = [len(col.title) for col in columns]
3592 else:
3593 data = []
3594 colwidth = [0 for _ in columns]
3595
3596 # Format row data
3597 for row in rows:
3598 assert len(row) == len(columns)
3599
3600 formatted = [col.format(value) for value, col in zip(row, columns)]
3601
3602 if separator is None:
3603 # Update column widths
3604 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3605 # Modifying a list's items while iterating is fine
3606 colwidth[idx] = max(oldwidth, len(value))
3607
3608 data.append(formatted)
3609
3610 if separator is not None:
3611 # Return early if a separator is used
3612 return [separator.join(row) for row in data]
3613
3614 if columns and not columns[-1].align_right:
3615 # Avoid unnecessary spaces at end of line
3616 colwidth[-1] = 0
3617
3618 # Build format string
3619 fmt = " ".join([_GetColFormatString(width, col.align_right)
3620 for col, width in zip(columns, colwidth)])
3621
3622 return [fmt % tuple(row) for row in data]
3623
3624
3625 def FormatTimestamp(ts):
3626 """Formats a given timestamp.
3627
3628 @type ts: timestamp
3629 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3630
3631 @rtype: string
3632 @return: a string with the formatted timestamp
3633
3634 """
3635 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3636 return "?"
3637
3638 (sec, usecs) = ts
3639 return utils.FormatTime(sec, usecs=usecs)
3640
3641
3642 def ParseTimespec(value):
3643 """Parse a time specification.
3644
3645 The following suffixed will be recognized:
3646
3647 - s: seconds
3648 - m: minutes
3649 - h: hours
3650 - d: day
3651 - w: weeks
3652
3653 Without any suffix, the value will be taken to be in seconds.
3654
3655 """
3656 value = str(value)
3657 if not value:
3658 raise errors.OpPrereqError("Empty time specification passed",
3659 errors.ECODE_INVAL)
3660 suffix_map = {
3661 "s": 1,
3662 "m": 60,
3663 "h": 3600,
3664 "d": 86400,
3665 "w": 604800,
3666 }
3667 if value[-1] not in suffix_map:
3668 try:
3669 value = int(value)
3670 except (TypeError, ValueError):
3671 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3672 errors.ECODE_INVAL)
3673 else:
3674 multiplier = suffix_map[value[-1]]
3675 value = value[:-1]
3676 if not value: # no data left after stripping the suffix
3677 raise errors.OpPrereqError("Invalid time specification (only"
3678 " suffix passed)", errors.ECODE_INVAL)
3679 try:
3680 value = int(value) * multiplier
3681 except (TypeError, ValueError):
3682 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3683 errors.ECODE_INVAL)
3684 return value
3685
3686
3687 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3688 filter_master=False, nodegroup=None):
3689 """Returns the names of online nodes.
3690
3691 This function will also log a warning on stderr with the names of
3692 the online nodes.
3693
3694 @param nodes: if not empty, use only this subset of nodes (minus the
3695 offline ones)
3696 @param cl: if not None, luxi client to use
3697 @type nowarn: boolean
3698 @param nowarn: by default, this function will output a note with the
3699 offline nodes that are skipped; if this parameter is True the
3700 note is not displayed
3701 @type secondary_ips: boolean
3702 @param secondary_ips: if True, return the secondary IPs instead of the
3703 names, useful for doing network traffic over the replication interface
3704 (if any)
3705 @type filter_master: boolean
3706 @param filter_master: if True, do not return the master node in the list
3707 (useful in coordination with secondary_ips where we cannot check our
3708 node name against the list)
3709 @type nodegroup: string
3710 @param nodegroup: If set, only return nodes in this node group
3711
3712 """
3713 if cl is None:
3714 cl = GetClient()
3715
3716 qfilter = []
3717
3718 if nodes:
3719 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3720
3721 if nodegroup is not None:
3722 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3723 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3724
3725 if filter_master:
3726 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3727
3728 if qfilter:
3729 if len(qfilter) > 1:
3730 final_filter = [qlang.OP_AND] + qfilter
3731 else:
3732 assert len(qfilter) == 1
3733 final_filter = qfilter[0]
3734 else:
3735 final_filter = None
3736
3737 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3738
3739 def _IsOffline(row):
3740 (_, (_, offline), _) = row
3741 return offline
3742
3743 def _GetName(row):
3744 ((_, name), _, _) = row
3745 return name
3746
3747 def _GetSip(row):
3748 (_, _, (_, sip)) = row
3749 return sip
3750
3751 (offline, online) = compat.partition(result.data, _IsOffline)
3752
3753 if offline and not nowarn:
3754 ToStderr("Note: skipping offline node(s): %s" %
3755 utils.CommaJoin(map(_GetName, offline)))
3756
3757 if secondary_ips:
3758 fn = _GetSip
3759 else:
3760 fn = _GetName
3761
3762 return map(fn, online)
3763
3764
3765 def GetNodesSshPorts(nodes, cl):
3766 """Retrieves SSH ports of given nodes.
3767
3768 @param nodes: the names of nodes
3769 @type nodes: a list of strings
3770 @param cl: a client to use for the query
3771 @type cl: L{ganeti.luxi.Client}
3772 @return: the list of SSH ports corresponding to the nodes
3773 @rtype: a list of tuples
3774 """
3775 return map(lambda t: t[0],
3776 cl.QueryNodes(names=nodes,
3777 fields=["ndp/ssh_port"],
3778 use_locking=False))
3779
3780