Implement predictive queue cluster parameter
[ganeti-github.git] / lib / objects.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Transportable objects for Ganeti.
32
33 This module provides small, mostly data-only objects which are safe to
34 pass to and from external parties.
35
36 """
37
38 # pylint: disable=E0203,E0237,W0201,R0902
39
40 # E0203: Access to member %r before its definition, since we use
41 # objects.py which doesn't explicitly initialise its members
42
43 # E0237: Assigning to attribute not defined in class slots. pylint doesn't
44 # appear to notice many of the slots defined in __slots__ for several objects.
45
46 # W0201: Attribute '%s' defined outside __init__
47
48 # R0902: Allow instances of these objects to have more than 20 attributes
49
50 import ConfigParser
51 import re
52 import copy
53 import logging
54 import time
55 from cStringIO import StringIO
56 from socket import AF_INET
57
58 from ganeti import errors
59 from ganeti import constants
60 from ganeti import netutils
61 from ganeti import outils
62 from ganeti import utils
63 from ganeti import serializer
64
65
66 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
67 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network",
68 "Filter", "Maintenance"]
69
70 _TIMESTAMPS = ["ctime", "mtime"]
71 _UUID = ["uuid"]
72
73
74 def FillDict(defaults_dict, custom_dict, skip_keys=None):
75 """Basic function to apply settings on top a default dict.
76
77 @type defaults_dict: dict
78 @param defaults_dict: dictionary holding the default values
79 @type custom_dict: dict
80 @param custom_dict: dictionary holding customized value
81 @type skip_keys: list
82 @param skip_keys: which keys not to fill
83 @rtype: dict
84 @return: dict with the 'full' values
85
86 """
87 ret_dict = copy.deepcopy(defaults_dict)
88 ret_dict.update(custom_dict)
89 if skip_keys:
90 for k in skip_keys:
91 if k in ret_dict:
92 del ret_dict[k]
93 return ret_dict
94
95
96 def FillIPolicy(default_ipolicy, custom_ipolicy):
97 """Fills an instance policy with defaults.
98
99 """
100 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
101 ret_dict = copy.deepcopy(custom_ipolicy)
102 for key in default_ipolicy:
103 if key not in ret_dict:
104 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
105 elif key == constants.ISPECS_STD:
106 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
107 return ret_dict
108
109
110 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
111 """Fills the disk parameter defaults.
112
113 @see: L{FillDict} for parameters and return value
114
115 """
116 return dict((dt, FillDict(default_dparams.get(dt, {}),
117 custom_dparams.get(dt, {}),
118 skip_keys=skip_keys))
119 for dt in constants.DISK_TEMPLATES)
120
121
122 def UpgradeGroupedParams(target, defaults):
123 """Update all groups for the target parameter.
124
125 @type target: dict of dicts
126 @param target: {group: {parameter: value}}
127 @type defaults: dict
128 @param defaults: default parameter values
129
130 """
131 if target is None:
132 target = {constants.PP_DEFAULT: defaults}
133 else:
134 for group in target:
135 target[group] = FillDict(defaults, target[group])
136 return target
137
138
139 def UpgradeBeParams(target):
140 """Update the be parameters dict to the new format.
141
142 @type target: dict
143 @param target: "be" parameters dict
144
145 """
146 if constants.BE_MEMORY in target:
147 memory = target[constants.BE_MEMORY]
148 target[constants.BE_MAXMEM] = memory
149 target[constants.BE_MINMEM] = memory
150 del target[constants.BE_MEMORY]
151
152
153 def UpgradeDiskParams(diskparams):
154 """Upgrade the disk parameters.
155
156 @type diskparams: dict
157 @param diskparams: disk parameters to upgrade
158 @rtype: dict
159 @return: the upgraded disk parameters dict
160
161 """
162 if not diskparams:
163 result = {}
164 else:
165 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
166
167 return result
168
169
170 def UpgradeNDParams(ndparams):
171 """Upgrade ndparams structure.
172
173 @type ndparams: dict
174 @param ndparams: disk parameters to upgrade
175 @rtype: dict
176 @return: the upgraded node parameters dict
177
178 """
179 if ndparams is None:
180 ndparams = {}
181
182 if (constants.ND_OOB_PROGRAM in ndparams and
183 ndparams[constants.ND_OOB_PROGRAM] is None):
184 # will be reset by the line below
185 del ndparams[constants.ND_OOB_PROGRAM]
186 return FillDict(constants.NDC_DEFAULTS, ndparams)
187
188
189 def MakeEmptyIPolicy():
190 """Create empty IPolicy dictionary.
191
192 """
193 return {}
194
195
196 class ConfigObject(outils.ValidatedSlots):
197 """A generic config object.
198
199 It has the following properties:
200
201 - provides somewhat safe recursive unpickling and pickling for its classes
202 - unset attributes which are defined in slots are always returned
203 as None instead of raising an error
204
205 Classes derived from this must always declare __slots__ (we use many
206 config objects and the memory reduction is useful)
207
208 """
209 __slots__ = []
210
211 def __getattr__(self, name):
212 if name not in self.GetAllSlots():
213 raise AttributeError("Invalid object attribute %s.%s" %
214 (type(self).__name__, name))
215 return None
216
217 def __setstate__(self, state):
218 slots = self.GetAllSlots()
219 for name in state:
220 if name in slots:
221 setattr(self, name, state[name])
222
223 def Validate(self):
224 """Validates the slots.
225
226 This method returns L{None} if the validation succeeds, or raises
227 an exception otherwise.
228
229 This method must be implemented by the child classes.
230
231 @rtype: NoneType
232 @return: L{None}, if the validation succeeds
233
234 @raise Exception: validation fails
235
236 """
237
238 def ToDict(self, _with_private=False):
239 """Convert to a dict holding only standard python types.
240
241 The generic routine just dumps all of this object's attributes in
242 a dict. It does not work if the class has children who are
243 ConfigObjects themselves (e.g. the nics list in an Instance), in
244 which case the object should subclass the function in order to
245 make sure all objects returned are only standard python types.
246
247 Private fields can be included or not with the _with_private switch.
248 The actual implementation of this switch is left for those subclassses
249 with private fields to implement.
250
251 @type _with_private: bool
252 @param _with_private: if True, the object will leak its private fields in
253 the dictionary representation. If False, the values
254 will be replaced with None.
255
256 """
257 result = {}
258 for name in self.GetAllSlots():
259 value = getattr(self, name, None)
260 if value is not None:
261 result[name] = value
262 return result
263
264 __getstate__ = ToDict
265
266 @classmethod
267 def FromDict(cls, val):
268 """Create an object from a dictionary.
269
270 This generic routine takes a dict, instantiates a new instance of
271 the given class, and sets attributes based on the dict content.
272
273 As for `ToDict`, this does not work if the class has children
274 who are ConfigObjects themselves (e.g. the nics list in an
275 Instance), in which case the object should subclass the function
276 and alter the objects.
277
278 """
279 if not isinstance(val, dict):
280 raise errors.ConfigurationError("Invalid object passed to FromDict:"
281 " expected dict, got %s" % type(val))
282 val_str = dict([(str(k), v) for k, v in val.iteritems()])
283 obj = cls(**val_str)
284 return obj
285
286 def Copy(self):
287 """Makes a deep copy of the current object and its children.
288
289 """
290 dict_form = self.ToDict()
291 clone_obj = self.__class__.FromDict(dict_form)
292 return clone_obj
293
294 def __repr__(self):
295 """Implement __repr__ for ConfigObjects."""
296 return repr(self.ToDict())
297
298 def __eq__(self, other):
299 """Implement __eq__ for ConfigObjects."""
300 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
301
302 def UpgradeConfig(self):
303 """Fill defaults for missing configuration values.
304
305 This method will be called at configuration load time, and its
306 implementation will be object dependent.
307
308 """
309 pass
310
311
312 class TaggableObject(ConfigObject):
313 """An generic class supporting tags.
314
315 """
316 __slots__ = ["tags"]
317 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
318
319 @classmethod
320 def ValidateTag(cls, tag):
321 """Check if a tag is valid.
322
323 If the tag is invalid, an errors.TagError will be raised. The
324 function has no return value.
325
326 """
327 if not isinstance(tag, basestring):
328 raise errors.TagError("Invalid tag type (not a string)")
329 if len(tag) > constants.MAX_TAG_LEN:
330 raise errors.TagError("Tag too long (>%d characters)" %
331 constants.MAX_TAG_LEN)
332 if not tag:
333 raise errors.TagError("Tags cannot be empty")
334 if not cls.VALID_TAG_RE.match(tag):
335 raise errors.TagError("Tag contains invalid characters")
336
337 def GetTags(self):
338 """Return the tags list.
339
340 """
341 tags = getattr(self, "tags", None)
342 if tags is None:
343 tags = self.tags = set()
344 return tags
345
346 def AddTag(self, tag):
347 """Add a new tag.
348
349 """
350 self.ValidateTag(tag)
351 tags = self.GetTags()
352 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
353 raise errors.TagError("Too many tags")
354 self.GetTags().add(tag)
355
356 def RemoveTag(self, tag):
357 """Remove a tag.
358
359 """
360 self.ValidateTag(tag)
361 tags = self.GetTags()
362 try:
363 tags.remove(tag)
364 except KeyError:
365 raise errors.TagError("Tag not found")
366
367 def ToDict(self, _with_private=False):
368 """Taggable-object-specific conversion to standard python types.
369
370 This replaces the tags set with a list.
371
372 """
373 bo = super(TaggableObject, self).ToDict(_with_private=_with_private)
374
375 tags = bo.get("tags", None)
376 if isinstance(tags, set):
377 bo["tags"] = list(tags)
378 return bo
379
380 @classmethod
381 def FromDict(cls, val):
382 """Custom function for instances.
383
384 """
385 obj = super(TaggableObject, cls).FromDict(val)
386 if hasattr(obj, "tags") and isinstance(obj.tags, list):
387 obj.tags = set(obj.tags)
388 return obj
389
390
391 class MasterNetworkParameters(ConfigObject):
392 """Network configuration parameters for the master
393
394 @ivar uuid: master nodes UUID
395 @ivar ip: master IP
396 @ivar netmask: master netmask
397 @ivar netdev: master network device
398 @ivar ip_family: master IP family
399
400 """
401 __slots__ = [
402 "uuid",
403 "ip",
404 "netmask",
405 "netdev",
406 "ip_family",
407 ]
408
409
410 class ConfigData(ConfigObject):
411 """Top-level config object."""
412 __slots__ = [
413 "version",
414 "cluster",
415 "nodes",
416 "nodegroups",
417 "instances",
418 "networks",
419 "disks",
420 "filters",
421 "maintenance",
422 "serial_no",
423 ] + _TIMESTAMPS
424
425 def ToDict(self, _with_private=False):
426 """Custom function for top-level config data.
427
428 This just replaces the list of nodes, instances, nodegroups,
429 networks, disks and the cluster with standard python types.
430
431 """
432 mydict = super(ConfigData, self).ToDict(_with_private=_with_private)
433 mydict["cluster"] = mydict["cluster"].ToDict()
434 mydict["maintenance"] = mydict["maintenance"].ToDict()
435 for key in ("nodes", "instances", "nodegroups", "networks", "disks",
436 "filters"):
437 mydict[key] = outils.ContainerToDicts(mydict[key])
438
439 return mydict
440
441 @classmethod
442 def FromDict(cls, val):
443 """Custom function for top-level config data
444
445 """
446 obj = super(ConfigData, cls).FromDict(val)
447 obj.cluster = Cluster.FromDict(obj.cluster)
448 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
449 obj.instances = \
450 outils.ContainerFromDicts(obj.instances, dict, Instance)
451 obj.nodegroups = \
452 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
453 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
454 obj.disks = outils.ContainerFromDicts(obj.disks, dict, Disk)
455 obj.filters = outils.ContainerFromDicts(obj.filters, dict, Filter)
456 obj.maintenance = Maintenance.FromDict(obj.maintenance)
457 return obj
458
459 def DisksOfType(self, dev_type):
460 """Check if in there is at disk of the given type in the configuration.
461
462 @type dev_type: L{constants.DTS_BLOCK}
463 @param dev_type: the type to look for
464 @rtype: list of disks
465 @return: all disks of the dev_type
466
467 """
468
469 return [disk for disk in self.disks.values()
470 if disk.IsBasedOnDiskType(dev_type)]
471
472 def UpgradeConfig(self):
473 """Fill defaults for missing configuration values.
474
475 """
476 self.cluster.UpgradeConfig()
477 for node in self.nodes.values():
478 node.UpgradeConfig()
479 for instance in self.instances.values():
480 instance.UpgradeConfig()
481 self._UpgradeEnabledDiskTemplates()
482 if self.nodegroups is None:
483 self.nodegroups = {}
484 for nodegroup in self.nodegroups.values():
485 nodegroup.UpgradeConfig()
486 InstancePolicy.UpgradeDiskTemplates(
487 nodegroup.ipolicy, self.cluster.enabled_disk_templates)
488 if self.cluster.drbd_usermode_helper is None:
489 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
490 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
491 if self.networks is None:
492 self.networks = {}
493 for network in self.networks.values():
494 network.UpgradeConfig()
495 for disk in self.disks.values():
496 disk.UpgradeConfig()
497 if self.filters is None:
498 self.filters = {}
499 if self.maintenance is None:
500 self.maintenance = Maintenance.FromDict({})
501 self.maintenance.UpgradeConfig()
502
503 def _UpgradeEnabledDiskTemplates(self):
504 """Upgrade the cluster's enabled disk templates by inspecting the currently
505 enabled and/or used disk templates.
506
507 """
508 if not self.cluster.enabled_disk_templates:
509 template_set = \
510 set([d.dev_type for d in self.disks.values()])
511 if any(not inst.disks for inst in self.instances.values()):
512 template_set.add(constants.DT_DISKLESS)
513 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
514 if self.cluster.volume_group_name:
515 template_set.add(constants.DT_DRBD8)
516 template_set.add(constants.DT_PLAIN)
517 # Set enabled_disk_templates to the inferred disk templates. Order them
518 # according to a preference list that is based on Ganeti's history of
519 # supported disk templates.
520 self.cluster.enabled_disk_templates = []
521 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
522 if preferred_template in template_set:
523 self.cluster.enabled_disk_templates.append(preferred_template)
524 template_set.remove(preferred_template)
525 self.cluster.enabled_disk_templates.extend(list(template_set))
526 InstancePolicy.UpgradeDiskTemplates(
527 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
528
529
530 class NIC(ConfigObject):
531 """Config object representing a network card."""
532 __slots__ = ["name", "mac", "ip", "network",
533 "nicparams", "netinfo", "pci", "hvinfo"] + _UUID
534
535 @classmethod
536 def CheckParameterSyntax(cls, nicparams):
537 """Check the given parameters for validity.
538
539 @type nicparams: dict
540 @param nicparams: dictionary with parameter names/value
541 @raise errors.ConfigurationError: when a parameter is not valid
542
543 """
544 mode = nicparams[constants.NIC_MODE]
545 if (mode not in constants.NIC_VALID_MODES and
546 mode != constants.VALUE_AUTO):
547 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
548
549 if (mode == constants.NIC_MODE_BRIDGED and
550 not nicparams[constants.NIC_LINK]):
551 raise errors.ConfigurationError("Missing bridged NIC link")
552
553
554 class Filter(ConfigObject):
555 """Config object representing a filter rule."""
556 __slots__ = ["watermark", "priority",
557 "predicates", "action", "reason_trail"] + _UUID
558
559
560 class Maintenance(ConfigObject):
561 """Config object representing the state of the maintenance daemon"""
562 __slots__ = ["roundDelay", "jobs", "evacuated", "balance", "balanceThreshold",
563 "incidents", "serial_no"] + _TIMESTAMPS
564
565 def UpgradeConfig(self):
566 if self.serial_no is None:
567 self.serial_no = 1
568 if self.mtime is None:
569 self.mtime = time.time()
570 if self.ctime is None:
571 self.ctime = time.time()
572
573
574 class Disk(ConfigObject):
575 """Config object representing a block device."""
576 __slots__ = [
577 "forthcoming",
578 "name",
579 "dev_type",
580 "logical_id",
581 "children",
582 "nodes",
583 "iv_name",
584 "size",
585 "mode",
586 "params",
587 "spindles",
588 "pci",
589 "hvinfo",
590 "serial_no",
591 # dynamic_params is special. It depends on the node this instance
592 # is sent to, and should not be persisted.
593 "dynamic_params"
594 ] + _UUID + _TIMESTAMPS
595
596 def _ComputeAllNodes(self):
597 """Compute the list of all nodes covered by a device and its children."""
598 def _Helper(nodes, device):
599 """Recursively compute nodes given a top device."""
600 if device.dev_type in constants.DTS_DRBD:
601 nodes.extend(device.logical_id[:2])
602 if device.children:
603 for child in device.children:
604 _Helper(nodes, child)
605
606 all_nodes = list()
607 _Helper(all_nodes, self)
608 return tuple(set(all_nodes))
609
610 all_nodes = property(_ComputeAllNodes, None, None,
611 "List of names of all the nodes of a disk")
612
613 def CreateOnSecondary(self):
614 """Test if this device needs to be created on a secondary node."""
615 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
616
617 def AssembleOnSecondary(self):
618 """Test if this device needs to be assembled on a secondary node."""
619 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
620
621 def OpenOnSecondary(self):
622 """Test if this device needs to be opened on a secondary node."""
623 return self.dev_type in (constants.DT_PLAIN,)
624
625 def SupportsSnapshots(self):
626 """Test if this device supports snapshots."""
627 return self.dev_type in constants.DTS_SNAPSHOT_CAPABLE
628
629 def StaticDevPath(self):
630 """Return the device path if this device type has a static one.
631
632 Some devices (LVM for example) live always at the same /dev/ path,
633 irrespective of their status. For such devices, we return this
634 path, for others we return None.
635
636 @warning: The path returned is not a normalized pathname; callers
637 should check that it is a valid path.
638
639 """
640 if self.dev_type == constants.DT_PLAIN:
641 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
642 elif self.dev_type == constants.DT_BLOCK:
643 return self.logical_id[1]
644 elif self.dev_type == constants.DT_RBD:
645 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
646 return None
647
648 def ChildrenNeeded(self):
649 """Compute the needed number of children for activation.
650
651 This method will return either -1 (all children) or a positive
652 number denoting the minimum number of children needed for
653 activation (only mirrored devices will usually return >=0).
654
655 Currently, only DRBD8 supports diskless activation (therefore we
656 return 0), for all other we keep the previous semantics and return
657 -1.
658
659 """
660 if self.dev_type == constants.DT_DRBD8:
661 return 0
662 return -1
663
664 def IsBasedOnDiskType(self, dev_type):
665 """Check if the disk or its children are based on the given type.
666
667 @type dev_type: L{constants.DTS_BLOCK}
668 @param dev_type: the type to look for
669 @rtype: boolean
670 @return: boolean indicating if a device of the given type was found or not
671
672 """
673 if self.children:
674 for child in self.children:
675 if child.IsBasedOnDiskType(dev_type):
676 return True
677 return self.dev_type == dev_type
678
679 def GetNodes(self, node_uuid):
680 """This function returns the nodes this device lives on.
681
682 Given the node on which the parent of the device lives on (or, in
683 case of a top-level device, the primary node of the devices'
684 instance), this function will return a list of nodes on which this
685 devices needs to (or can) be assembled.
686
687 """
688 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
689 constants.DT_BLOCK, constants.DT_RBD,
690 constants.DT_EXT, constants.DT_SHARED_FILE,
691 constants.DT_GLUSTER]:
692 result = [node_uuid]
693 elif self.dev_type in constants.DTS_DRBD:
694 result = [self.logical_id[0], self.logical_id[1]]
695 if node_uuid not in result:
696 raise errors.ConfigurationError("DRBD device passed unknown node")
697 else:
698 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
699 return result
700
701 def GetPrimaryNode(self, node_uuid):
702 """This function returns the primary node of the device.
703
704 If the device is not a DRBD device, we still return the node the device
705 lives on.
706
707 """
708 if self.dev_type in constants.DTS_DRBD:
709 return self.logical_id[0]
710 return node_uuid
711
712 def ComputeNodeTree(self, parent_node_uuid):
713 """Compute the node/disk tree for this disk and its children.
714
715 This method, given the node on which the parent disk lives, will
716 return the list of all (node UUID, disk) pairs which describe the disk
717 tree in the most compact way. For example, a drbd/lvm stack
718 will be returned as (primary_node, drbd) and (secondary_node, drbd)
719 which represents all the top-level devices on the nodes.
720
721 """
722 my_nodes = self.GetNodes(parent_node_uuid)
723 result = [(node, self) for node in my_nodes]
724 if not self.children:
725 # leaf device
726 return result
727 for node in my_nodes:
728 for child in self.children:
729 child_result = child.ComputeNodeTree(node)
730 if len(child_result) == 1:
731 # child (and all its descendants) is simple, doesn't split
732 # over multiple hosts, so we don't need to describe it, our
733 # own entry for this node describes it completely
734 continue
735 else:
736 # check if child nodes differ from my nodes; note that
737 # subdisk can differ from the child itself, and be instead
738 # one of its descendants
739 for subnode, subdisk in child_result:
740 if subnode not in my_nodes:
741 result.append((subnode, subdisk))
742 # otherwise child is under our own node, so we ignore this
743 # entry (but probably the other results in the list will
744 # be different)
745 return result
746
747 def ComputeGrowth(self, amount):
748 """Compute the per-VG growth requirements.
749
750 This only works for VG-based disks.
751
752 @type amount: integer
753 @param amount: the desired increase in (user-visible) disk space
754 @rtype: dict
755 @return: a dictionary of volume-groups and the required size
756
757 """
758 if self.dev_type == constants.DT_PLAIN:
759 return {self.logical_id[0]: amount}
760 elif self.dev_type == constants.DT_DRBD8:
761 if self.children:
762 return self.children[0].ComputeGrowth(amount)
763 else:
764 return {}
765 else:
766 # Other disk types do not require VG space
767 return {}
768
769 def RecordGrow(self, amount):
770 """Update the size of this disk after growth.
771
772 This method recurses over the disks's children and updates their
773 size correspondigly. The method needs to be kept in sync with the
774 actual algorithms from bdev.
775
776 """
777 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
778 constants.DT_RBD, constants.DT_EXT,
779 constants.DT_SHARED_FILE, constants.DT_GLUSTER):
780 self.size += amount
781 elif self.dev_type == constants.DT_DRBD8:
782 if self.children:
783 self.children[0].RecordGrow(amount)
784 self.size += amount
785 else:
786 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
787 " disk type %s" % self.dev_type)
788
789 def Update(self, size=None, mode=None, spindles=None):
790 """Apply changes to size, spindles and mode.
791
792 """
793 if self.dev_type == constants.DT_DRBD8:
794 if self.children:
795 self.children[0].Update(size=size, mode=mode)
796 else:
797 assert not self.children
798
799 if size is not None:
800 self.size = size
801 if mode is not None:
802 self.mode = mode
803 if spindles is not None:
804 self.spindles = spindles
805
806 def UnsetSize(self):
807 """Sets recursively the size to zero for the disk and its children.
808
809 """
810 if self.children:
811 for child in self.children:
812 child.UnsetSize()
813 self.size = 0
814
815 def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
816 """Updates the dynamic disk params for the given node.
817
818 This is mainly used for drbd, which needs ip/port configuration.
819
820 Arguments:
821 - target_node_uuid: the node UUID we wish to configure for
822 - nodes_ip: a mapping of node name to ip
823
824 The target_node must exist in nodes_ip, and should be one of the
825 nodes in the logical ID if this device is a DRBD device.
826
827 """
828 if self.children:
829 for child in self.children:
830 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
831
832 dyn_disk_params = {}
833 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
834 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
835 if target_node_uuid not in (pnode_uuid, snode_uuid):
836 # disk object is being sent to neither the primary nor the secondary
837 # node. reset the dynamic parameters, the target node is not
838 # supposed to use them.
839 self.dynamic_params = dyn_disk_params
840 return
841
842 pnode_ip = nodes_ip.get(pnode_uuid, None)
843 snode_ip = nodes_ip.get(snode_uuid, None)
844 if pnode_ip is None or snode_ip is None:
845 raise errors.ConfigurationError("Can't find primary or secondary node"
846 " for %s" % str(self))
847 if pnode_uuid == target_node_uuid:
848 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
849 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
850 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
851 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
852 else: # it must be secondary, we tested above
853 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
854 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
855 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
856 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
857
858 self.dynamic_params = dyn_disk_params
859
860 # pylint: disable=W0221
861 def ToDict(self, include_dynamic_params=False,
862 _with_private=False):
863 """Disk-specific conversion to standard python types.
864
865 This replaces the children lists of objects with lists of
866 standard python types.
867
868 """
869 bo = super(Disk, self).ToDict(_with_private=_with_private)
870 if not include_dynamic_params and "dynamic_params" in bo:
871 del bo["dynamic_params"]
872
873 if _with_private and "logical_id" in bo:
874 mutable_id = list(bo["logical_id"])
875 mutable_id[5] = mutable_id[5].Get()
876 bo["logical_id"] = tuple(mutable_id)
877
878 for attr in ("children",):
879 alist = bo.get(attr, None)
880 if alist:
881 bo[attr] = outils.ContainerToDicts(alist)
882 return bo
883
884 @classmethod
885 def FromDict(cls, val):
886 """Custom function for Disks
887
888 """
889 obj = super(Disk, cls).FromDict(val)
890 if obj.children:
891 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
892 if obj.logical_id and isinstance(obj.logical_id, list):
893 obj.logical_id = tuple(obj.logical_id)
894 if obj.dev_type in constants.DTS_DRBD:
895 # we need a tuple of length six here
896 if len(obj.logical_id) < 6:
897 obj.logical_id += (None,) * (6 - len(obj.logical_id))
898 # If we do have a tuple of length 6, make the last entry (secret key)
899 # private
900 elif (len(obj.logical_id) == 6 and
901 not isinstance(obj.logical_id[-1], serializer.Private)):
902 obj.logical_id = obj.logical_id[:-1] + \
903 (serializer.Private(obj.logical_id[-1]),)
904 return obj
905
906 def __str__(self):
907 """Custom str() formatter for disks.
908
909 """
910 if self.dev_type == constants.DT_PLAIN:
911 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
912 elif self.dev_type in constants.DTS_DRBD:
913 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
914 val = "<DRBD8("
915
916 val += ("hosts=%s/%d-%s/%d, port=%s, " %
917 (node_a, minor_a, node_b, minor_b, port))
918 if self.children and self.children.count(None) == 0:
919 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
920 else:
921 val += "no local storage"
922 else:
923 val = ("<Disk(type=%s, logical_id=%s, children=%s" %
924 (self.dev_type, self.logical_id, self.children))
925 if self.iv_name is None:
926 val += ", not visible"
927 else:
928 val += ", visible as /dev/%s" % self.iv_name
929 if self.spindles is not None:
930 val += ", spindles=%s" % self.spindles
931 if isinstance(self.size, int):
932 val += ", size=%dm)>" % self.size
933 else:
934 val += ", size='%s')>" % (self.size,)
935 return val
936
937 def Verify(self):
938 """Checks that this disk is correctly configured.
939
940 """
941 all_errors = []
942 if self.mode not in constants.DISK_ACCESS_SET:
943 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
944 return all_errors
945
946 def UpgradeConfig(self):
947 """Fill defaults for missing configuration values.
948
949 """
950 if self.children:
951 for child in self.children:
952 child.UpgradeConfig()
953
954 # FIXME: Make this configurable in Ganeti 2.7
955 # Params should be an empty dict that gets filled any time needed
956 # In case of ext template we allow arbitrary params that should not
957 # be overrided during a config reload/upgrade.
958 if not self.params or not isinstance(self.params, dict):
959 self.params = {}
960
961 # add here config upgrade for this disk
962 if self.serial_no is None:
963 self.serial_no = 1
964 if self.mtime is None:
965 self.mtime = time.time()
966 if self.ctime is None:
967 self.ctime = time.time()
968
969 # map of legacy device types (mapping differing LD constants to new
970 # DT constants)
971 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
972 if self.dev_type in LEG_DEV_TYPE_MAP:
973 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
974
975 @staticmethod
976 def ComputeLDParams(disk_template, disk_params):
977 """Computes Logical Disk parameters from Disk Template parameters.
978
979 @type disk_template: string
980 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
981 @type disk_params: dict
982 @param disk_params: disk template parameters;
983 dict(template_name -> parameters
984 @rtype: list(dict)
985 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
986 contains the LD parameters of the node. The tree is flattened in-order.
987
988 """
989 if disk_template not in constants.DISK_TEMPLATES:
990 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
991
992 assert disk_template in disk_params
993
994 result = list()
995 dt_params = disk_params[disk_template]
996
997 if disk_template == constants.DT_DRBD8:
998 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
999 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
1000 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
1001 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
1002 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
1003 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
1004 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
1005 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
1006 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
1007 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
1008 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
1009 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
1010 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
1011 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
1012 }))
1013
1014 # data LV
1015 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
1016 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
1017 }))
1018
1019 # metadata LV
1020 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
1021 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
1022 }))
1023
1024 else:
1025 defaults = constants.DISK_LD_DEFAULTS[disk_template]
1026 values = {}
1027 for field in defaults:
1028 values[field] = dt_params[field]
1029 result.append(FillDict(defaults, values))
1030
1031 return result
1032
1033
1034 class InstancePolicy(ConfigObject):
1035 """Config object representing instance policy limits dictionary.
1036
1037 Note that this object is not actually used in the config, it's just
1038 used as a placeholder for a few functions.
1039
1040 """
1041 @classmethod
1042 def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
1043 """Upgrades the ipolicy configuration."""
1044 if constants.IPOLICY_DTS in ipolicy:
1045 if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
1046 set(enabled_disk_templates)):
1047 ipolicy[constants.IPOLICY_DTS] = list(
1048 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
1049
1050 @classmethod
1051 def CheckParameterSyntax(cls, ipolicy, check_std):
1052 """ Check the instance policy for validity.
1053
1054 @type ipolicy: dict
1055 @param ipolicy: dictionary with min/max/std specs and policies
1056 @type check_std: bool
1057 @param check_std: Whether to check std value or just assume compliance
1058 @raise errors.ConfigurationError: when the policy is not legal
1059
1060 """
1061 InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
1062 if constants.IPOLICY_DTS in ipolicy:
1063 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
1064 for key in constants.IPOLICY_PARAMETERS:
1065 if key in ipolicy:
1066 InstancePolicy.CheckParameter(key, ipolicy[key])
1067 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1068 if wrong_keys:
1069 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
1070 utils.CommaJoin(wrong_keys))
1071
1072 @classmethod
1073 def _CheckIncompleteSpec(cls, spec, keyname):
1074 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
1075 if missing_params:
1076 msg = ("Missing instance specs parameters for %s: %s" %
1077 (keyname, utils.CommaJoin(missing_params)))
1078 raise errors.ConfigurationError(msg)
1079
1080 @classmethod
1081 def CheckISpecSyntax(cls, ipolicy, check_std):
1082 """Check the instance policy specs for validity.
1083
1084 @type ipolicy: dict
1085 @param ipolicy: dictionary with min/max/std specs
1086 @type check_std: bool
1087 @param check_std: Whether to check std value or just assume compliance
1088 @raise errors.ConfigurationError: when specs are not valid
1089
1090 """
1091 if constants.ISPECS_MINMAX not in ipolicy:
1092 # Nothing to check
1093 return
1094
1095 if check_std and constants.ISPECS_STD not in ipolicy:
1096 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
1097 raise errors.ConfigurationError(msg)
1098 stdspec = ipolicy.get(constants.ISPECS_STD)
1099 if check_std:
1100 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
1101
1102 if not ipolicy[constants.ISPECS_MINMAX]:
1103 raise errors.ConfigurationError("Empty minmax specifications")
1104 std_is_good = False
1105 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
1106 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
1107 if missing:
1108 msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
1109 raise errors.ConfigurationError(msg)
1110 for (key, spec) in minmaxspecs.items():
1111 InstancePolicy._CheckIncompleteSpec(spec, key)
1112
1113 spec_std_ok = True
1114 for param in constants.ISPECS_PARAMETERS:
1115 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
1116 param, check_std)
1117 spec_std_ok = spec_std_ok and par_std_ok
1118 std_is_good = std_is_good or spec_std_ok
1119 if not std_is_good:
1120 raise errors.ConfigurationError("Invalid std specifications")
1121
1122 @classmethod
1123 def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1124 """Check the instance policy specs for validity on a given key.
1125
1126 We check if the instance specs makes sense for a given key, that is
1127 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1128
1129 @type minmaxspecs: dict
1130 @param minmaxspecs: dictionary with min and max instance spec
1131 @type stdspec: dict
1132 @param stdspec: dictionary with standard instance spec
1133 @type name: string
1134 @param name: what are the limits for
1135 @type check_std: bool
1136 @param check_std: Whether to check std value or just assume compliance
1137 @rtype: bool
1138 @return: C{True} when specs are valid, C{False} when standard spec for the
1139 given name is not valid
1140 @raise errors.ConfigurationError: when min/max specs for the given name
1141 are not valid
1142
1143 """
1144 minspec = minmaxspecs[constants.ISPECS_MIN]
1145 maxspec = minmaxspecs[constants.ISPECS_MAX]
1146 min_v = minspec[name]
1147 max_v = maxspec[name]
1148
1149 if min_v > max_v:
1150 err = ("Invalid specification of min/max values for %s: %s/%s" %
1151 (name, min_v, max_v))
1152 raise errors.ConfigurationError(err)
1153 elif check_std:
1154 std_v = stdspec.get(name, min_v)
1155 return std_v >= min_v and std_v <= max_v
1156 else:
1157 return True
1158
1159 @classmethod
1160 def CheckDiskTemplates(cls, disk_templates):
1161 """Checks the disk templates for validity.
1162
1163 """
1164 if not disk_templates:
1165 raise errors.ConfigurationError("Instance policy must contain" +
1166 " at least one disk template")
1167 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1168 if wrong:
1169 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1170 utils.CommaJoin(wrong))
1171
1172 @classmethod
1173 def CheckParameter(cls, key, value):
1174 """Checks a parameter.
1175
1176 Currently we expect all parameters to be float values.
1177
1178 """
1179 try:
1180 float(value)
1181 except (TypeError, ValueError), err:
1182 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1183 " '%s', error: %s" % (key, value, err))
1184
1185
1186 def GetOSImage(osparams):
1187 """Gets the OS image value from the OS parameters.
1188
1189 @type osparams: L{dict} or NoneType
1190 @param osparams: OS parameters or None
1191
1192 @rtype: string or NoneType
1193 @return:
1194 value of OS image contained in OS parameters, or None if the OS
1195 parameters are None or the OS parameters do not contain an OS
1196 image
1197
1198 """
1199 if osparams is None:
1200 return None
1201 else:
1202 return osparams.get("os-image", None)
1203
1204
1205 def PutOSImage(osparams, os_image):
1206 """Update OS image value in the OS parameters
1207
1208 @type osparams: L{dict}
1209 @param osparams: OS parameters
1210
1211 @type os_image: string
1212 @param os_image: OS image
1213
1214 @rtype: NoneType
1215 @return: None
1216
1217 """
1218 osparams["os-image"] = os_image
1219
1220
1221 class Instance(TaggableObject):
1222 """Config object representing an instance."""
1223 __slots__ = [
1224 "forthcoming",
1225 "name",
1226 "primary_node",
1227 "secondary_nodes",
1228 "os",
1229 "hypervisor",
1230 "hvparams",
1231 "beparams",
1232 "osparams",
1233 "osparams_private",
1234 "admin_state",
1235 "admin_state_source",
1236 "nics",
1237 "disks",
1238 "disks_info",
1239 "disk_template",
1240 "disks_active",
1241 "network_port",
1242 "serial_no",
1243 ] + _TIMESTAMPS + _UUID
1244
1245 def FindDisk(self, idx):
1246 """Find a disk given having a specified index.
1247
1248 This is just a wrapper that does validation of the index.
1249
1250 @type idx: int
1251 @param idx: the disk index
1252 @rtype: string
1253 @return: the corresponding disk's uuid
1254 @raise errors.OpPrereqError: when the given index is not valid
1255
1256 """
1257 try:
1258 idx = int(idx)
1259 return self.disks[idx]
1260 except (TypeError, ValueError), err:
1261 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1262 errors.ECODE_INVAL)
1263 except IndexError:
1264 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1265 " 0 to %d" % (idx, len(self.disks) - 1),
1266 errors.ECODE_INVAL)
1267
1268 def ToDict(self, _with_private=False):
1269 """Instance-specific conversion to standard python types.
1270
1271 This replaces the children lists of objects with lists of standard
1272 python types.
1273
1274 """
1275 bo = super(Instance, self).ToDict(_with_private=_with_private)
1276
1277 if _with_private:
1278 bo["osparams_private"] = self.osparams_private.Unprivate()
1279
1280 for attr in ("nics",):
1281 alist = bo.get(attr, None)
1282 if alist:
1283 nlist = outils.ContainerToDicts(alist)
1284 else:
1285 nlist = []
1286 bo[attr] = nlist
1287
1288 if 'disk_template' in bo:
1289 del bo['disk_template']
1290
1291 return bo
1292
1293 @classmethod
1294 def FromDict(cls, val):
1295 """Custom function for instances.
1296
1297 """
1298 if "admin_state" not in val:
1299 if val.get("admin_up", False):
1300 val["admin_state"] = constants.ADMINST_UP
1301 else:
1302 val["admin_state"] = constants.ADMINST_DOWN
1303 if "admin_up" in val:
1304 del val["admin_up"]
1305 obj = super(Instance, cls).FromDict(val)
1306 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1307
1308 # attribute 'disks_info' is only present when deserializing from a RPC
1309 # call in the backend
1310 disks_info = getattr(obj, "disks_info", None)
1311 if disks_info:
1312 obj.disks_info = outils.ContainerFromDicts(disks_info, list, Disk)
1313
1314 return obj
1315
1316 def UpgradeConfig(self):
1317 """Fill defaults for missing configuration values.
1318
1319 """
1320 if self.admin_state_source is None:
1321 self.admin_state_source = constants.ADMIN_SOURCE
1322 for nic in self.nics:
1323 nic.UpgradeConfig()
1324 if self.disks is None:
1325 self.disks = []
1326 if self.hvparams:
1327 for key in constants.HVC_GLOBALS:
1328 try:
1329 del self.hvparams[key]
1330 except KeyError:
1331 pass
1332 if self.osparams is None:
1333 self.osparams = {}
1334 if self.osparams_private is None:
1335 self.osparams_private = serializer.PrivateDict()
1336 UpgradeBeParams(self.beparams)
1337 if self.disks_active is None:
1338 self.disks_active = self.admin_state == constants.ADMINST_UP
1339
1340
1341 class OS(ConfigObject):
1342 """Config object representing an operating system.
1343
1344 @type supported_parameters: list
1345 @ivar supported_parameters: a list of tuples, name and description,
1346 containing the supported parameters by this OS
1347
1348 @type VARIANT_DELIM: string
1349 @cvar VARIANT_DELIM: the variant delimiter
1350
1351 """
1352 __slots__ = [
1353 "name",
1354 "path",
1355 "api_versions",
1356 "create_script",
1357 "create_script_untrusted",
1358 "export_script",
1359 "import_script",
1360 "rename_script",
1361 "verify_script",
1362 "supported_variants",
1363 "supported_parameters",
1364 ]
1365
1366 VARIANT_DELIM = "+"
1367
1368 @classmethod
1369 def SplitNameVariant(cls, name):
1370 """Splits the name into the proper name and variant.
1371
1372 @param name: the OS (unprocessed) name
1373 @rtype: list
1374 @return: a list of two elements; if the original name didn't
1375 contain a variant, it's returned as an empty string
1376
1377 """
1378 nv = name.split(cls.VARIANT_DELIM, 1)
1379 if len(nv) == 1:
1380 nv.append("")
1381 return nv
1382
1383 @classmethod
1384 def GetName(cls, name):
1385 """Returns the proper name of the os (without the variant).
1386
1387 @param name: the OS (unprocessed) name
1388
1389 """
1390 return cls.SplitNameVariant(name)[0]
1391
1392 @classmethod
1393 def GetVariant(cls, name):
1394 """Returns the variant the os (without the base name).
1395
1396 @param name: the OS (unprocessed) name
1397
1398 """
1399 return cls.SplitNameVariant(name)[1]
1400
1401 def IsTrusted(self):
1402 """Returns whether this OS is trusted.
1403
1404 @rtype: bool
1405 @return: L{True} if this OS is trusted, L{False} otherwise
1406
1407 """
1408 return not self.create_script_untrusted
1409
1410
1411 class ExtStorage(ConfigObject):
1412 """Config object representing an External Storage Provider.
1413
1414 """
1415 __slots__ = [
1416 "name",
1417 "path",
1418 "create_script",
1419 "remove_script",
1420 "grow_script",
1421 "attach_script",
1422 "detach_script",
1423 "setinfo_script",
1424 "verify_script",
1425 "snapshot_script",
1426 "open_script",
1427 "close_script",
1428 "supported_parameters",
1429 ]
1430
1431
1432 class NodeHvState(ConfigObject):
1433 """Hypvervisor state on a node.
1434
1435 @ivar mem_total: Total amount of memory
1436 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1437 available)
1438 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1439 rounding
1440 @ivar mem_inst: Memory used by instances living on node
1441 @ivar cpu_total: Total node CPU core count
1442 @ivar cpu_node: Number of CPU cores reserved for the node itself
1443
1444 """
1445 __slots__ = [
1446 "mem_total",
1447 "mem_node",
1448 "mem_hv",
1449 "mem_inst",
1450 "cpu_total",
1451 "cpu_node",
1452 ] + _TIMESTAMPS
1453
1454
1455 class NodeDiskState(ConfigObject):
1456 """Disk state on a node.
1457
1458 """
1459 __slots__ = [
1460 "total",
1461 "reserved",
1462 "overhead",
1463 ] + _TIMESTAMPS
1464
1465
1466 class Node(TaggableObject):
1467 """Config object representing a node.
1468
1469 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1470 @ivar hv_state_static: Hypervisor state overriden by user
1471 @ivar disk_state: Disk state (e.g. free space)
1472 @ivar disk_state_static: Disk state overriden by user
1473
1474 """
1475 __slots__ = [
1476 "name",
1477 "primary_ip",
1478 "secondary_ip",
1479 "serial_no",
1480 "master_candidate",
1481 "offline",
1482 "drained",
1483 "group",
1484 "master_capable",
1485 "vm_capable",
1486 "ndparams",
1487 "powered",
1488 "hv_state",
1489 "hv_state_static",
1490 "disk_state",
1491 "disk_state_static",
1492 ] + _TIMESTAMPS + _UUID
1493
1494 def UpgradeConfig(self):
1495 """Fill defaults for missing configuration values.
1496
1497 """
1498 # pylint: disable=E0203
1499 # because these are "defined" via slots, not manually
1500 if self.master_capable is None:
1501 self.master_capable = True
1502
1503 if self.vm_capable is None:
1504 self.vm_capable = True
1505
1506 if self.ndparams is None:
1507 self.ndparams = {}
1508 # And remove any global parameter
1509 for key in constants.NDC_GLOBALS:
1510 if key in self.ndparams:
1511 logging.warning("Ignoring %s node parameter for node %s",
1512 key, self.name)
1513 del self.ndparams[key]
1514
1515 if self.powered is None:
1516 self.powered = True
1517
1518 if self.hv_state_static is None:
1519 self.hv_state_static = {}
1520 if self.disk_state_static is None:
1521 self.disk_state_static = {}
1522
1523 def ToDict(self, _with_private=False):
1524 """Custom function for serializing.
1525
1526 """
1527 data = super(Node, self).ToDict(_with_private=_with_private)
1528
1529 hv_state = data.get("hv_state", None)
1530 if hv_state is not None:
1531 data["hv_state"] = outils.ContainerToDicts(hv_state)
1532
1533 disk_state = data.get("disk_state", None)
1534 if disk_state is not None:
1535 data["disk_state"] = \
1536 dict((key, outils.ContainerToDicts(value))
1537 for (key, value) in disk_state.items())
1538
1539 return data
1540
1541 @classmethod
1542 def FromDict(cls, val):
1543 """Custom function for deserializing.
1544
1545 """
1546 obj = super(Node, cls).FromDict(val)
1547
1548 if obj.hv_state is not None:
1549 obj.hv_state = \
1550 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1551
1552 if obj.disk_state is not None:
1553 obj.disk_state = \
1554 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1555 for (key, value) in obj.disk_state.items())
1556
1557 return obj
1558
1559
1560 class NodeGroup(TaggableObject):
1561 """Config object representing a node group."""
1562 __slots__ = [
1563 "name",
1564 "members",
1565 "ndparams",
1566 "diskparams",
1567 "ipolicy",
1568 "serial_no",
1569 "hv_state_static",
1570 "disk_state_static",
1571 "alloc_policy",
1572 "networks",
1573 ] + _TIMESTAMPS + _UUID
1574
1575 def ToDict(self, _with_private=False):
1576 """Custom function for nodegroup.
1577
1578 This discards the members object, which gets recalculated and is only kept
1579 in memory.
1580
1581 """
1582 mydict = super(NodeGroup, self).ToDict(_with_private=_with_private)
1583 del mydict["members"]
1584 return mydict
1585
1586 @classmethod
1587 def FromDict(cls, val):
1588 """Custom function for nodegroup.
1589
1590 The members slot is initialized to an empty list, upon deserialization.
1591
1592 """
1593 obj = super(NodeGroup, cls).FromDict(val)
1594 obj.members = []
1595 return obj
1596
1597 def UpgradeConfig(self):
1598 """Fill defaults for missing configuration values.
1599
1600 """
1601 if self.ndparams is None:
1602 self.ndparams = {}
1603
1604 if self.serial_no is None:
1605 self.serial_no = 1
1606
1607 if self.alloc_policy is None:
1608 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1609
1610 # We only update mtime, and not ctime, since we would not be able
1611 # to provide a correct value for creation time.
1612 if self.mtime is None:
1613 self.mtime = time.time()
1614
1615 if self.diskparams is None:
1616 self.diskparams = {}
1617 if self.ipolicy is None:
1618 self.ipolicy = MakeEmptyIPolicy()
1619
1620 if self.hv_state_static is None:
1621 self.hv_state_static = {}
1622 if self.disk_state_static is None:
1623 self.disk_state_static = {}
1624
1625 if self.networks is None:
1626 self.networks = {}
1627
1628 for network, netparams in self.networks.items():
1629 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1630
1631 def FillND(self, node):
1632 """Return filled out ndparams for L{objects.Node}
1633
1634 @type node: L{objects.Node}
1635 @param node: A Node object to fill
1636 @return a copy of the node's ndparams with defaults filled
1637
1638 """
1639 return self.SimpleFillND(node.ndparams)
1640
1641 def SimpleFillND(self, ndparams):
1642 """Fill a given ndparams dict with defaults.
1643
1644 @type ndparams: dict
1645 @param ndparams: the dict to fill
1646 @rtype: dict
1647 @return: a copy of the passed in ndparams with missing keys filled
1648 from the node group defaults
1649
1650 """
1651 return FillDict(self.ndparams, ndparams)
1652
1653
1654 class Cluster(TaggableObject):
1655 """Config object representing the cluster."""
1656 __slots__ = [
1657 "serial_no",
1658 "rsahostkeypub",
1659 "dsahostkeypub",
1660 "highest_used_port",
1661 "tcpudp_port_pool",
1662 "mac_prefix",
1663 "volume_group_name",
1664 "reserved_lvs",
1665 "drbd_usermode_helper",
1666 "default_bridge",
1667 "default_hypervisor",
1668 "master_node",
1669 "master_ip",
1670 "master_netdev",
1671 "master_netmask",
1672 "use_external_mip_script",
1673 "cluster_name",
1674 "file_storage_dir",
1675 "shared_file_storage_dir",
1676 "gluster_storage_dir",
1677 "enabled_hypervisors",
1678 "hvparams",
1679 "ipolicy",
1680 "os_hvp",
1681 "beparams",
1682 "osparams",
1683 "osparams_private_cluster",
1684 "nicparams",
1685 "ndparams",
1686 "diskparams",
1687 "candidate_pool_size",
1688 "modify_etc_hosts",
1689 "modify_ssh_setup",
1690 "maintain_node_health",
1691 "uid_pool",
1692 "default_iallocator",
1693 "default_iallocator_params",
1694 "hidden_os",
1695 "blacklisted_os",
1696 "primary_ip_family",
1697 "prealloc_wipe_disks",
1698 "hv_state_static",
1699 "disk_state_static",
1700 "enabled_disk_templates",
1701 "candidate_certs",
1702 "max_running_jobs",
1703 "max_tracked_jobs",
1704 "install_image",
1705 "instance_communication_network",
1706 "zeroing_image",
1707 "compression_tools",
1708 "enabled_user_shutdown",
1709 "data_collectors",
1710 "diagnose_data_collector_filename",
1711 "ssh_key_type",
1712 "ssh_key_bits",
1713 "enabled_predictive_queue",
1714 ] + _TIMESTAMPS + _UUID
1715
1716 def UpgradeConfig(self):
1717 """Fill defaults for missing configuration values.
1718
1719 """
1720 # pylint: disable=E0203
1721 # because these are "defined" via slots, not manually
1722 if self.hvparams is None:
1723 self.hvparams = constants.HVC_DEFAULTS
1724 else:
1725 for hypervisor in constants.HYPER_TYPES:
1726 try:
1727 existing_params = self.hvparams[hypervisor]
1728 except KeyError:
1729 existing_params = {}
1730 self.hvparams[hypervisor] = FillDict(
1731 constants.HVC_DEFAULTS[hypervisor], existing_params)
1732
1733 if self.os_hvp is None:
1734 self.os_hvp = {}
1735
1736 if self.osparams is None:
1737 self.osparams = {}
1738 # osparams_private_cluster added in 2.12
1739 if self.osparams_private_cluster is None:
1740 self.osparams_private_cluster = {}
1741
1742 self.ndparams = UpgradeNDParams(self.ndparams)
1743
1744 self.beparams = UpgradeGroupedParams(self.beparams,
1745 constants.BEC_DEFAULTS)
1746 for beparams_group in self.beparams:
1747 UpgradeBeParams(self.beparams[beparams_group])
1748
1749 migrate_default_bridge = not self.nicparams
1750 self.nicparams = UpgradeGroupedParams(self.nicparams,
1751 constants.NICC_DEFAULTS)
1752 if migrate_default_bridge:
1753 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1754 self.default_bridge
1755
1756 if self.modify_etc_hosts is None:
1757 self.modify_etc_hosts = True
1758
1759 if self.modify_ssh_setup is None:
1760 self.modify_ssh_setup = True
1761
1762 # default_bridge is no longer used in 2.1. The slot is left there to
1763 # support auto-upgrading. It can be removed once we decide to deprecate
1764 # upgrading straight from 2.0.
1765 if self.default_bridge is not None:
1766 self.default_bridge = None
1767
1768 # default_hypervisor is just the first enabled one in 2.1. This slot and
1769 # code can be removed once upgrading straight from 2.0 is deprecated.
1770 if self.default_hypervisor is not None:
1771 self.enabled_hypervisors = ([self.default_hypervisor] +
1772 [hvname for hvname in self.enabled_hypervisors
1773 if hvname != self.default_hypervisor])
1774 self.default_hypervisor = None
1775
1776 # maintain_node_health added after 2.1.1
1777 if self.maintain_node_health is None:
1778 self.maintain_node_health = False
1779
1780 if self.uid_pool is None:
1781 self.uid_pool = []
1782
1783 if self.default_iallocator is None:
1784 self.default_iallocator = ""
1785
1786 if self.default_iallocator_params is None:
1787 self.default_iallocator_params = {}
1788
1789 # reserved_lvs added before 2.2
1790 if self.reserved_lvs is None:
1791 self.reserved_lvs = []
1792
1793 # hidden and blacklisted operating systems added before 2.2.1
1794 if self.hidden_os is None:
1795 self.hidden_os = []
1796
1797 if self.blacklisted_os is None:
1798 self.blacklisted_os = []
1799
1800 # primary_ip_family added before 2.3
1801 if self.primary_ip_family is None:
1802 self.primary_ip_family = AF_INET
1803
1804 if self.master_netmask is None:
1805 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1806 self.master_netmask = ipcls.iplen
1807
1808 if self.prealloc_wipe_disks is None:
1809 self.prealloc_wipe_disks = False
1810
1811 # shared_file_storage_dir added before 2.5
1812 if self.shared_file_storage_dir is None:
1813 self.shared_file_storage_dir = ""
1814
1815 # gluster_storage_dir added in 2.11
1816 if self.gluster_storage_dir is None:
1817 self.gluster_storage_dir = ""
1818
1819 if self.use_external_mip_script is None:
1820 self.use_external_mip_script = False
1821
1822 if self.diskparams:
1823 self.diskparams = UpgradeDiskParams(self.diskparams)
1824 else:
1825 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1826
1827 # instance policy added before 2.6
1828 if self.ipolicy is None:
1829 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1830 else:
1831 # we can either make sure to upgrade the ipolicy always, or only
1832 # do it in some corner cases (e.g. missing keys); note that this
1833 # will break any removal of keys from the ipolicy dict
1834 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1835 if wrongkeys:
1836 # These keys would be silently removed by FillIPolicy()
1837 msg = ("Cluster instance policy contains spurious keys: %s" %
1838 utils.CommaJoin(wrongkeys))
1839 raise errors.ConfigurationError(msg)
1840 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1841
1842 # hv_state_static added in 2.7
1843 if self.hv_state_static is None:
1844 self.hv_state_static = {}
1845 if self.disk_state_static is None:
1846 self.disk_state_static = {}
1847
1848 if self.candidate_certs is None:
1849 self.candidate_certs = {}
1850
1851 if self.max_running_jobs is None:
1852 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
1853
1854 if self.max_tracked_jobs is None:
1855 self.max_tracked_jobs = constants.LUXID_MAXIMAL_TRACKED_JOBS_DEFAULT
1856
1857 if self.instance_communication_network is None:
1858 self.instance_communication_network = ""
1859
1860 if self.install_image is None:
1861 self.install_image = ""
1862
1863 if self.compression_tools is None:
1864 self.compression_tools = constants.IEC_DEFAULT_TOOLS
1865
1866 if self.enabled_user_shutdown is None:
1867 self.enabled_user_shutdown = False
1868
1869 if self.ssh_key_type is None:
1870 self.ssh_key_type = constants.SSH_DEFAULT_KEY_TYPE
1871
1872 if self.ssh_key_bits is None:
1873 self.ssh_key_bits = constants.SSH_DEFAULT_KEY_BITS
1874
1875 @property
1876 def primary_hypervisor(self):
1877 """The first hypervisor is the primary.
1878
1879 Useful, for example, for L{Node}'s hv/disk state.
1880
1881 """
1882 return self.enabled_hypervisors[0]
1883
1884 def ToDict(self, _with_private=False):
1885 """Custom function for cluster.
1886
1887 """
1888 mydict = super(Cluster, self).ToDict(_with_private=_with_private)
1889
1890 # Explicitly save private parameters.
1891 if _with_private:
1892 for os in mydict["osparams_private_cluster"]:
1893 mydict["osparams_private_cluster"][os] = \
1894 self.osparams_private_cluster[os].Unprivate()
1895
1896 if self.tcpudp_port_pool is None:
1897 tcpudp_port_pool = []
1898 else:
1899 tcpudp_port_pool = list(self.tcpudp_port_pool)
1900
1901 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1902
1903 return mydict
1904
1905 @classmethod
1906 def FromDict(cls, val):
1907 """Custom function for cluster.
1908
1909 """
1910 obj = super(Cluster, cls).FromDict(val)
1911
1912 if obj.tcpudp_port_pool is None:
1913 obj.tcpudp_port_pool = set()
1914 elif not isinstance(obj.tcpudp_port_pool, set):
1915 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1916
1917 return obj
1918
1919 def SimpleFillDP(self, diskparams):
1920 """Fill a given diskparams dict with cluster defaults.
1921
1922 @param diskparams: The diskparams
1923 @return: The defaults dict
1924
1925 """
1926 return FillDiskParams(self.diskparams, diskparams)
1927
1928 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1929 """Get the default hypervisor parameters for the cluster.
1930
1931 @param hypervisor: the hypervisor name
1932 @param os_name: if specified, we'll also update the defaults for this OS
1933 @param skip_keys: if passed, list of keys not to use
1934 @return: the defaults dict
1935
1936 """
1937 if skip_keys is None:
1938 skip_keys = []
1939
1940 fill_stack = [self.hvparams.get(hypervisor, {})]
1941 if os_name is not None:
1942 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1943 fill_stack.append(os_hvp)
1944
1945 ret_dict = {}
1946 for o_dict in fill_stack:
1947 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1948
1949 return ret_dict
1950
1951 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1952 """Fill a given hvparams dict with cluster defaults.
1953
1954 @type hv_name: string
1955 @param hv_name: the hypervisor to use
1956 @type os_name: string
1957 @param os_name: the OS to use for overriding the hypervisor defaults
1958 @type skip_globals: boolean
1959 @param skip_globals: if True, the global hypervisor parameters will
1960 not be filled
1961 @rtype: dict
1962 @return: a copy of the given hvparams with missing keys filled from
1963 the cluster defaults
1964
1965 """
1966 if skip_globals:
1967 skip_keys = constants.HVC_GLOBALS
1968 else:
1969 skip_keys = []
1970
1971 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1972 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1973
1974 def FillHV(self, instance, skip_globals=False):
1975 """Fill an instance's hvparams dict with cluster defaults.
1976
1977 @type instance: L{objects.Instance}
1978 @param instance: the instance parameter to fill
1979 @type skip_globals: boolean
1980 @param skip_globals: if True, the global hypervisor parameters will
1981 not be filled
1982 @rtype: dict
1983 @return: a copy of the instance's hvparams with missing keys filled from
1984 the cluster defaults
1985
1986 """
1987 return self.SimpleFillHV(instance.hypervisor, instance.os,
1988 instance.hvparams, skip_globals)
1989
1990 def SimpleFillBE(self, beparams):
1991 """Fill a given beparams dict with cluster defaults.
1992
1993 @type beparams: dict
1994 @param beparams: the dict to fill
1995 @rtype: dict
1996 @return: a copy of the passed in beparams with missing keys filled
1997 from the cluster defaults
1998
1999 """
2000 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
2001
2002 def FillBE(self, instance):
2003 """Fill an instance's beparams dict with cluster defaults.
2004
2005 @type instance: L{objects.Instance}
2006 @param instance: the instance parameter to fill
2007 @rtype: dict
2008 @return: a copy of the instance's beparams with missing keys filled from
2009 the cluster defaults
2010
2011 """
2012 return self.SimpleFillBE(instance.beparams)
2013
2014 def SimpleFillNIC(self, nicparams):
2015 """Fill a given nicparams dict with cluster defaults.
2016
2017 @type nicparams: dict
2018 @param nicparams: the dict to fill
2019 @rtype: dict
2020 @return: a copy of the passed in nicparams with missing keys filled
2021 from the cluster defaults
2022
2023 """
2024 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
2025
2026 def SimpleFillOS(self, os_name,
2027 os_params_public,
2028 os_params_private=None,
2029 os_params_secret=None):
2030 """Fill an instance's osparams dict with cluster defaults.
2031
2032 @type os_name: string
2033 @param os_name: the OS name to use
2034 @type os_params_public: dict
2035 @param os_params_public: the dict to fill with default values
2036 @type os_params_private: dict
2037 @param os_params_private: the dict with private fields to fill
2038 with default values. Not passing this field
2039 results in no private fields being added to the
2040 return value. Private fields will be wrapped in
2041 L{Private} objects.
2042 @type os_params_secret: dict
2043 @param os_params_secret: the dict with secret fields to fill
2044 with default values. Not passing this field
2045 results in no secret fields being added to the
2046 return value. Private fields will be wrapped in
2047 L{Private} objects.
2048 @rtype: dict
2049 @return: a copy of the instance's osparams with missing keys filled from
2050 the cluster defaults. Private and secret parameters are not included
2051 unless the respective optional parameters are supplied.
2052
2053 """
2054 if os_name is None:
2055 name_only = None
2056 else:
2057 name_only = OS.GetName(os_name)
2058
2059 defaults_base_public = self.osparams.get(name_only, {})
2060 defaults_public = FillDict(defaults_base_public,
2061 self.osparams.get(os_name, {}))
2062 params_public = FillDict(defaults_public, os_params_public)
2063
2064 if os_params_private is not None:
2065 defaults_base_private = self.osparams_private_cluster.get(name_only, {})
2066 defaults_private = FillDict(defaults_base_private,
2067 self.osparams_private_cluster.get(os_name,
2068 {}))
2069 params_private = FillDict(defaults_private, os_params_private)
2070 else:
2071 params_private = {}
2072
2073 if os_params_secret is not None:
2074 # There can't be default secret settings, so there's nothing to be done.
2075 params_secret = os_params_secret
2076 else:
2077 params_secret = {}
2078
2079 # Enforce that the set of keys be distinct:
2080 duplicate_keys = utils.GetRepeatedKeys(params_public,
2081 params_private,
2082 params_secret)
2083 if not duplicate_keys:
2084
2085 # Actually update them:
2086 params_public.update(params_private)
2087 params_public.update(params_secret)
2088
2089 return params_public
2090
2091 else:
2092
2093 def formatter(keys):
2094 return utils.CommaJoin(sorted(map(repr, keys))) if keys else "(none)"
2095
2096 #Lose the values.
2097 params_public = set(params_public)
2098 params_private = set(params_private)
2099 params_secret = set(params_secret)
2100
2101 msg = """Cannot assign multiple values to OS parameters.
2102
2103 Conflicting OS parameters that would have been set by this operation:
2104 - at public visibility: {public}
2105 - at private visibility: {private}
2106 - at secret visibility: {secret}
2107 """.format(public=formatter(params_public & duplicate_keys),
2108 private=formatter(params_private & duplicate_keys),
2109 secret=formatter(params_secret & duplicate_keys))
2110 raise errors.OpPrereqError(msg)
2111
2112 @staticmethod
2113 def SimpleFillHvState(hv_state):
2114 """Fill an hv_state sub dict with cluster defaults.
2115
2116 """
2117 return FillDict(constants.HVST_DEFAULTS, hv_state)
2118
2119 @staticmethod
2120 def SimpleFillDiskState(disk_state):
2121 """Fill an disk_state sub dict with cluster defaults.
2122
2123 """
2124 return FillDict(constants.DS_DEFAULTS, disk_state)
2125
2126 def FillND(self, node, nodegroup):
2127 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
2128
2129 @type node: L{objects.Node}
2130 @param node: A Node object to fill
2131 @type nodegroup: L{objects.NodeGroup}
2132 @param nodegroup: A Node object to fill
2133 @return a copy of the node's ndparams with defaults filled
2134
2135 """
2136 return self.SimpleFillND(nodegroup.FillND(node))
2137
2138 def FillNDGroup(self, nodegroup):
2139 """Return filled out ndparams for just L{objects.NodeGroup}
2140
2141 @type nodegroup: L{objects.NodeGroup}
2142 @param nodegroup: A Node object to fill
2143 @return a copy of the node group's ndparams with defaults filled
2144
2145 """
2146 return self.SimpleFillND(nodegroup.SimpleFillND({}))
2147
2148 def SimpleFillND(self, ndparams):
2149 """Fill a given ndparams dict with defaults.
2150
2151 @type ndparams: dict
2152 @param ndparams: the dict to fill
2153 @rtype: dict
2154 @return: a copy of the passed in ndparams with missing keys filled
2155 from the cluster defaults
2156
2157 """
2158 return FillDict(self.ndparams, ndparams)
2159
2160 def SimpleFillIPolicy(self, ipolicy):
2161 """ Fill instance policy dict with defaults.
2162
2163 @type ipolicy: dict
2164 @param ipolicy: the dict to fill
2165 @rtype: dict
2166 @return: a copy of passed ipolicy with missing keys filled from
2167 the cluster defaults
2168
2169 """
2170 return FillIPolicy(self.ipolicy, ipolicy)
2171
2172 def IsDiskTemplateEnabled(self, disk_template):
2173 """Checks if a particular disk template is enabled.
2174
2175 """
2176 return utils.storage.IsDiskTemplateEnabled(
2177 disk_template, self.enabled_disk_templates)
2178
2179 def IsFileStorageEnabled(self):
2180 """Checks if file storage is enabled.
2181
2182 """
2183 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
2184
2185 def IsSharedFileStorageEnabled(self):
2186 """Checks if shared file storage is enabled.
2187
2188 """
2189 return utils.storage.IsSharedFileStorageEnabled(
2190 self.enabled_disk_templates)
2191
2192
2193 class BlockDevStatus(ConfigObject):
2194 """Config object representing the status of a block device."""
2195 __slots__ = [
2196 "dev_path",
2197 "major",
2198 "minor",
2199 "sync_percent",
2200 "estimated_time",
2201 "is_degraded",
2202 "ldisk_status",
2203 ]
2204
2205
2206 class ImportExportStatus(ConfigObject):
2207 """Config object representing the status of an import or export."""
2208 __slots__ = [
2209 "recent_output",
2210 "listen_port",
2211 "connected",
2212 "progress_mbytes",
2213 "progress_throughput",
2214 "progress_eta",
2215 "progress_percent",
2216 "exit_status",
2217 "error_message",
2218 ] + _TIMESTAMPS
2219
2220
2221 class ImportExportOptions(ConfigObject):
2222 """Options for import/export daemon
2223
2224 @ivar key_name: X509 key name (None for cluster certificate)
2225 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
2226 @ivar compress: Compression tool to use
2227 @ivar magic: Used to ensure the connection goes to the right disk
2228 @ivar ipv6: Whether to use IPv6
2229 @ivar connect_timeout: Number of seconds for establishing connection
2230
2231 """
2232 __slots__ = [
2233 "key_name",
2234 "ca_pem",
2235 "compress",
2236 "magic",
2237 "ipv6",
2238 "connect_timeout",
2239 ]
2240
2241
2242 class ConfdRequest(ConfigObject):
2243 """Object holding a confd request.
2244
2245 @ivar protocol: confd protocol version
2246 @ivar type: confd query type
2247 @ivar query: query request
2248 @ivar rsalt: requested reply salt
2249
2250 """
2251 __slots__ = [
2252 "protocol",
2253 "type",
2254 "query",
2255 "rsalt",
2256 ]
2257
2258
2259 class ConfdReply(ConfigObject):
2260 """Object holding a confd reply.
2261
2262 @ivar protocol: confd protocol version
2263 @ivar status: reply status code (ok, error)
2264 @ivar answer: confd query reply
2265 @ivar serial: configuration serial number
2266
2267 """
2268 __slots__ = [
2269 "protocol",
2270 "status",
2271 "answer",
2272 "serial",
2273 ]
2274
2275
2276 class QueryFieldDefinition(ConfigObject):
2277 """Object holding a query field definition.
2278
2279 @ivar name: Field name
2280 @ivar title: Human-readable title
2281 @ivar kind: Field type
2282 @ivar doc: Human-readable description
2283
2284 """
2285 __slots__ = [
2286 "name",
2287 "title",
2288 "kind",
2289 "doc",
2290 ]
2291
2292
2293 class _QueryResponseBase(ConfigObject):
2294 __slots__ = [
2295 "fields",
2296 ]
2297
2298 def ToDict(self, _with_private=False):
2299 """Custom function for serializing.
2300
2301 """
2302 mydict = super(_QueryResponseBase, self).ToDict()
2303 mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2304 return mydict
2305
2306 @classmethod
2307 def FromDict(cls, val):
2308 """Custom function for de-serializing.
2309
2310 """
2311 obj = super(_QueryResponseBase, cls).FromDict(val)
2312 obj.fields = \
2313 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2314 return obj
2315
2316
2317 class QueryResponse(_QueryResponseBase):
2318 """Object holding the response to a query.
2319
2320 @ivar fields: List of L{QueryFieldDefinition} objects
2321 @ivar data: Requested data
2322
2323 """
2324 __slots__ = [
2325 "data",
2326 ]
2327
2328
2329 class QueryFieldsRequest(ConfigObject):
2330 """Object holding a request for querying available fields.
2331
2332 """
2333 __slots__ = [
2334 "what",
2335 "fields",
2336 ]
2337
2338
2339 class QueryFieldsResponse(_QueryResponseBase):
2340 """Object holding the response to a query for fields.
2341
2342 @ivar fields: List of L{QueryFieldDefinition} objects
2343
2344 """
2345 __slots__ = []
2346
2347
2348 class MigrationStatus(ConfigObject):
2349 """Object holding the status of a migration.
2350
2351 """
2352 __slots__ = [
2353 "status",
2354 "transferred_ram",
2355 "total_ram",
2356 ]
2357
2358
2359 class InstanceConsole(ConfigObject):
2360 """Object describing how to access the console of an instance.
2361
2362 """
2363 __slots__ = [
2364 "instance",
2365 "kind",
2366 "message",
2367 "host",
2368 "port",
2369 "user",
2370 "command",
2371 "display",
2372 ]
2373
2374 def Validate(self):
2375 """Validates contents of this object.
2376
2377 """
2378 assert self.kind in constants.CONS_ALL, "Unknown console type"
2379 assert self.instance, "Missing instance name"
2380 assert self.message or self.kind in [constants.CONS_SSH,
2381 constants.CONS_SPICE,
2382 constants.CONS_VNC]
2383 assert self.host or self.kind == constants.CONS_MESSAGE
2384 assert self.port or self.kind in [constants.CONS_MESSAGE,
2385 constants.CONS_SSH]
2386 assert self.user or self.kind in [constants.CONS_MESSAGE,
2387 constants.CONS_SPICE,
2388 constants.CONS_VNC]
2389 assert self.command or self.kind in [constants.CONS_MESSAGE,
2390 constants.CONS_SPICE,
2391 constants.CONS_VNC]
2392 assert self.display or self.kind in [constants.CONS_MESSAGE,
2393 constants.CONS_SPICE,
2394 constants.CONS_SSH]
2395
2396
2397 class Network(TaggableObject):
2398 """Object representing a network definition for ganeti.
2399
2400 """
2401 __slots__ = [
2402 "name",
2403 "serial_no",
2404 "mac_prefix",
2405 "network",
2406 "network6",
2407 "gateway",
2408 "gateway6",
2409 "reservations",
2410 "ext_reservations",
2411 ] + _TIMESTAMPS + _UUID
2412
2413 def HooksDict(self, prefix=""):
2414 """Export a dictionary used by hooks with a network's information.
2415
2416 @type prefix: String
2417 @param prefix: Prefix to prepend to the dict entries
2418
2419 """
2420 result = {
2421 "%sNETWORK_NAME" % prefix: self.name,
2422 "%sNETWORK_UUID" % prefix: self.uuid,
2423 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2424 }
2425 if self.network:
2426 result["%sNETWORK_SUBNET" % prefix] = self.network
2427 if self.gateway:
2428 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2429 if self.network6:
2430 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2431 if self.gateway6:
2432 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2433 if self.mac_prefix:
2434 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2435
2436 return result
2437
2438 @classmethod
2439 def FromDict(cls, val):
2440 """Custom function for networks.
2441
2442 Remove deprecated network_type and family.
2443
2444 """
2445 if "network_type" in val:
2446 del val["network_type"]
2447 if "family" in val:
2448 del val["family"]
2449 obj = super(Network, cls).FromDict(val)
2450 return obj
2451
2452
2453 # need to inherit object in order to use super()
2454 class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2455 """Simple wrapper over ConfigParse that allows serialization.
2456
2457 This class is basically ConfigParser.SafeConfigParser with two
2458 additional methods that allow it to serialize/unserialize to/from a
2459 buffer.
2460
2461 """
2462 def Dumps(self):
2463 """Dump this instance and return the string representation."""
2464 buf = StringIO()
2465 self.write(buf)
2466 return buf.getvalue()
2467
2468 @classmethod
2469 def Loads(cls, data):
2470 """Load data from a string."""
2471 buf = StringIO(data)
2472 cfp = cls()
2473 cfp.readfp(buf)
2474 return cfp
2475
2476 def get(self, section, option, **kwargs):
2477 value = None
2478 try:
2479 value = super(SerializableConfigParser, self).get(section, option,
2480 **kwargs)
2481 if value.lower() == constants.VALUE_NONE:
2482 value = None
2483 except ConfigParser.NoOptionError:
2484 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2485 match = r.match(option)
2486 if match:
2487 pass
2488 else:
2489 raise
2490
2491 return value
2492
2493
2494 class LvmPvInfo(ConfigObject):
2495 """Information about an LVM physical volume (PV).
2496
2497 @type name: string
2498 @ivar name: name of the PV
2499 @type vg_name: string
2500 @ivar vg_name: name of the volume group containing the PV
2501 @type size: float
2502 @ivar size: size of the PV in MiB
2503 @type free: float
2504 @ivar free: free space in the PV, in MiB
2505 @type attributes: string
2506 @ivar attributes: PV attributes
2507 @type lv_list: list of strings
2508 @ivar lv_list: names of the LVs hosted on the PV
2509 """
2510 __slots__ = [
2511 "name",
2512 "vg_name",
2513 "size",
2514 "free",
2515 "attributes",
2516 "lv_list"
2517 ]
2518
2519 def IsEmpty(self):
2520 """Is this PV empty?
2521
2522 """
2523 return self.size <= (self.free + 1)
2524
2525 def IsAllocatable(self):
2526 """Is this PV allocatable?
2527
2528 """
2529 return ("a" in self.attributes)