7e20fc2cbb338914381bc9399e38992aca2b27e8
[ganeti-github.git] / lib / objects.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Transportable objects for Ganeti.
32
33 This module provides small, mostly data-only objects which are safe to
34 pass to and from external parties.
35
36 """
37
38 # pylint: disable=E0203,E0237,W0201,R0902
39
40 # E0203: Access to member %r before its definition, since we use
41 # objects.py which doesn't explicitly initialise its members
42
43 # E0237: Assigning to attribute not defined in class slots. pylint doesn't
44 # appear to notice many of the slots defined in __slots__ for several objects.
45
46 # W0201: Attribute '%s' defined outside __init__
47
48 # R0902: Allow instances of these objects to have more than 20 attributes
49
50 import ConfigParser
51 import re
52 import copy
53 import logging
54 import time
55 from cStringIO import StringIO
56 from socket import AF_INET
57
58 from ganeti import errors
59 from ganeti import constants
60 from ganeti import netutils
61 from ganeti import outils
62 from ganeti import utils
63 from ganeti import serializer
64
65
66 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
67 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network",
68 "Filter", "Maintenance"]
69
70 _TIMESTAMPS = ["ctime", "mtime"]
71 _UUID = ["uuid"]
72
73
74 def FillDict(defaults_dict, custom_dict, skip_keys=None):
75 """Basic function to apply settings on top a default dict.
76
77 @type defaults_dict: dict
78 @param defaults_dict: dictionary holding the default values
79 @type custom_dict: dict
80 @param custom_dict: dictionary holding customized value
81 @type skip_keys: list
82 @param skip_keys: which keys not to fill
83 @rtype: dict
84 @return: dict with the 'full' values
85
86 """
87 ret_dict = copy.deepcopy(defaults_dict)
88 ret_dict.update(custom_dict)
89 if skip_keys:
90 for k in skip_keys:
91 if k in ret_dict:
92 del ret_dict[k]
93 return ret_dict
94
95
96 def FillIPolicy(default_ipolicy, custom_ipolicy):
97 """Fills an instance policy with defaults.
98
99 """
100 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
101 ret_dict = copy.deepcopy(custom_ipolicy)
102 for key in default_ipolicy:
103 if key not in ret_dict:
104 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
105 elif key == constants.ISPECS_STD:
106 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
107 return ret_dict
108
109
110 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
111 """Fills the disk parameter defaults.
112
113 @see: L{FillDict} for parameters and return value
114
115 """
116 return dict((dt, FillDict(default_dparams.get(dt, {}),
117 custom_dparams.get(dt, {}),
118 skip_keys=skip_keys))
119 for dt in constants.DISK_TEMPLATES)
120
121
122 def UpgradeGroupedParams(target, defaults):
123 """Update all groups for the target parameter.
124
125 @type target: dict of dicts
126 @param target: {group: {parameter: value}}
127 @type defaults: dict
128 @param defaults: default parameter values
129
130 """
131 if target is None:
132 target = {constants.PP_DEFAULT: defaults}
133 else:
134 for group in target:
135 target[group] = FillDict(defaults, target[group])
136 return target
137
138
139 def UpgradeBeParams(target):
140 """Update the be parameters dict to the new format.
141
142 @type target: dict
143 @param target: "be" parameters dict
144
145 """
146 if constants.BE_MEMORY in target:
147 memory = target[constants.BE_MEMORY]
148 target[constants.BE_MAXMEM] = memory
149 target[constants.BE_MINMEM] = memory
150 del target[constants.BE_MEMORY]
151
152
153 def UpgradeDiskParams(diskparams):
154 """Upgrade the disk parameters.
155
156 @type diskparams: dict
157 @param diskparams: disk parameters to upgrade
158 @rtype: dict
159 @return: the upgraded disk parameters dict
160
161 """
162 if not diskparams:
163 result = {}
164 else:
165 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
166
167 return result
168
169
170 def UpgradeNDParams(ndparams):
171 """Upgrade ndparams structure.
172
173 @type ndparams: dict
174 @param ndparams: disk parameters to upgrade
175 @rtype: dict
176 @return: the upgraded node parameters dict
177
178 """
179 if ndparams is None:
180 ndparams = {}
181
182 if (constants.ND_OOB_PROGRAM in ndparams and
183 ndparams[constants.ND_OOB_PROGRAM] is None):
184 # will be reset by the line below
185 del ndparams[constants.ND_OOB_PROGRAM]
186 return FillDict(constants.NDC_DEFAULTS, ndparams)
187
188
189 def MakeEmptyIPolicy():
190 """Create empty IPolicy dictionary.
191
192 """
193 return {}
194
195
196 class ConfigObject(outils.ValidatedSlots):
197 """A generic config object.
198
199 It has the following properties:
200
201 - provides somewhat safe recursive unpickling and pickling for its classes
202 - unset attributes which are defined in slots are always returned
203 as None instead of raising an error
204
205 Classes derived from this must always declare __slots__ (we use many
206 config objects and the memory reduction is useful)
207
208 """
209 __slots__ = []
210
211 def __getattr__(self, name):
212 if name not in self.GetAllSlots():
213 raise AttributeError("Invalid object attribute %s.%s" %
214 (type(self).__name__, name))
215 return None
216
217 def __setstate__(self, state):
218 slots = self.GetAllSlots()
219 for name in state:
220 if name in slots:
221 setattr(self, name, state[name])
222
223 def Validate(self):
224 """Validates the slots.
225
226 This method returns L{None} if the validation succeeds, or raises
227 an exception otherwise.
228
229 This method must be implemented by the child classes.
230
231 @rtype: NoneType
232 @return: L{None}, if the validation succeeds
233
234 @raise Exception: validation fails
235
236 """
237
238 def ToDict(self, _with_private=False):
239 """Convert to a dict holding only standard python types.
240
241 The generic routine just dumps all of this object's attributes in
242 a dict. It does not work if the class has children who are
243 ConfigObjects themselves (e.g. the nics list in an Instance), in
244 which case the object should subclass the function in order to
245 make sure all objects returned are only standard python types.
246
247 Private fields can be included or not with the _with_private switch.
248 The actual implementation of this switch is left for those subclassses
249 with private fields to implement.
250
251 @type _with_private: bool
252 @param _with_private: if True, the object will leak its private fields in
253 the dictionary representation. If False, the values
254 will be replaced with None.
255
256 """
257 result = {}
258 for name in self.GetAllSlots():
259 value = getattr(self, name, None)
260 if value is not None:
261 result[name] = value
262 return result
263
264 __getstate__ = ToDict
265
266 @classmethod
267 def FromDict(cls, val):
268 """Create an object from a dictionary.
269
270 This generic routine takes a dict, instantiates a new instance of
271 the given class, and sets attributes based on the dict content.
272
273 As for `ToDict`, this does not work if the class has children
274 who are ConfigObjects themselves (e.g. the nics list in an
275 Instance), in which case the object should subclass the function
276 and alter the objects.
277
278 """
279 if not isinstance(val, dict):
280 raise errors.ConfigurationError("Invalid object passed to FromDict:"
281 " expected dict, got %s" % type(val))
282 val_str = dict([(str(k), v) for k, v in val.iteritems()])
283 obj = cls(**val_str)
284 return obj
285
286 def Copy(self):
287 """Makes a deep copy of the current object and its children.
288
289 """
290 dict_form = self.ToDict()
291 clone_obj = self.__class__.FromDict(dict_form)
292 return clone_obj
293
294 def __repr__(self):
295 """Implement __repr__ for ConfigObjects."""
296 return repr(self.ToDict())
297
298 def __eq__(self, other):
299 """Implement __eq__ for ConfigObjects."""
300 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
301
302 def UpgradeConfig(self):
303 """Fill defaults for missing configuration values.
304
305 This method will be called at configuration load time, and its
306 implementation will be object dependent.
307
308 """
309 pass
310
311
312 class TaggableObject(ConfigObject):
313 """An generic class supporting tags.
314
315 """
316 __slots__ = ["tags"]
317 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
318
319 @classmethod
320 def ValidateTag(cls, tag):
321 """Check if a tag is valid.
322
323 If the tag is invalid, an errors.TagError will be raised. The
324 function has no return value.
325
326 """
327 if not isinstance(tag, basestring):
328 raise errors.TagError("Invalid tag type (not a string)")
329 if len(tag) > constants.MAX_TAG_LEN:
330 raise errors.TagError("Tag too long (>%d characters)" %
331 constants.MAX_TAG_LEN)
332 if not tag:
333 raise errors.TagError("Tags cannot be empty")
334 if not cls.VALID_TAG_RE.match(tag):
335 raise errors.TagError("Tag contains invalid characters")
336
337 def GetTags(self):
338 """Return the tags list.
339
340 """
341 tags = getattr(self, "tags", None)
342 if tags is None:
343 tags = self.tags = set()
344 return tags
345
346 def AddTag(self, tag):
347 """Add a new tag.
348
349 """
350 self.ValidateTag(tag)
351 tags = self.GetTags()
352 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
353 raise errors.TagError("Too many tags")
354 self.GetTags().add(tag)
355
356 def RemoveTag(self, tag):
357 """Remove a tag.
358
359 """
360 self.ValidateTag(tag)
361 tags = self.GetTags()
362 try:
363 tags.remove(tag)
364 except KeyError:
365 raise errors.TagError("Tag not found")
366
367 def ToDict(self, _with_private=False):
368 """Taggable-object-specific conversion to standard python types.
369
370 This replaces the tags set with a list.
371
372 """
373 bo = super(TaggableObject, self).ToDict(_with_private=_with_private)
374
375 tags = bo.get("tags", None)
376 if isinstance(tags, set):
377 bo["tags"] = list(tags)
378 return bo
379
380 @classmethod
381 def FromDict(cls, val):
382 """Custom function for instances.
383
384 """
385 obj = super(TaggableObject, cls).FromDict(val)
386 if hasattr(obj, "tags") and isinstance(obj.tags, list):
387 obj.tags = set(obj.tags)
388 return obj
389
390
391 class MasterNetworkParameters(ConfigObject):
392 """Network configuration parameters for the master
393
394 @ivar uuid: master nodes UUID
395 @ivar ip: master IP
396 @ivar netmask: master netmask
397 @ivar netdev: master network device
398 @ivar ip_family: master IP family
399
400 """
401 __slots__ = [
402 "uuid",
403 "ip",
404 "netmask",
405 "netdev",
406 "ip_family",
407 ]
408
409
410 class ConfigData(ConfigObject):
411 """Top-level config object."""
412 __slots__ = [
413 "version",
414 "cluster",
415 "nodes",
416 "nodegroups",
417 "instances",
418 "networks",
419 "disks",
420 "filters",
421 "maintenance",
422 "serial_no",
423 ] + _TIMESTAMPS
424
425 def ToDict(self, _with_private=False):
426 """Custom function for top-level config data.
427
428 This just replaces the list of nodes, instances, nodegroups,
429 networks, disks and the cluster with standard python types.
430
431 """
432 mydict = super(ConfigData, self).ToDict(_with_private=_with_private)
433 mydict["cluster"] = mydict["cluster"].ToDict()
434 mydict["maintenance"] = mydict["maintenance"].ToDict()
435 for key in ("nodes", "instances", "nodegroups", "networks", "disks",
436 "filters"):
437 mydict[key] = outils.ContainerToDicts(mydict[key])
438
439 return mydict
440
441 @classmethod
442 def FromDict(cls, val):
443 """Custom function for top-level config data
444
445 """
446 obj = super(ConfigData, cls).FromDict(val)
447 obj.cluster = Cluster.FromDict(obj.cluster)
448 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
449 obj.instances = \
450 outils.ContainerFromDicts(obj.instances, dict, Instance)
451 obj.nodegroups = \
452 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
453 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
454 obj.disks = outils.ContainerFromDicts(obj.disks, dict, Disk)
455 obj.filters = outils.ContainerFromDicts(obj.filters, dict, Filter)
456 obj.maintenance = Maintenance.FromDict(obj.maintenance)
457 return obj
458
459 def DisksOfType(self, dev_type):
460 """Check if in there is at disk of the given type in the configuration.
461
462 @type dev_type: L{constants.DTS_BLOCK}
463 @param dev_type: the type to look for
464 @rtype: list of disks
465 @return: all disks of the dev_type
466
467 """
468
469 return [disk for disk in self.disks.values()
470 if disk.IsBasedOnDiskType(dev_type)]
471
472 def UpgradeConfig(self):
473 """Fill defaults for missing configuration values.
474
475 """
476 self.cluster.UpgradeConfig()
477 for node in self.nodes.values():
478 node.UpgradeConfig()
479 for instance in self.instances.values():
480 instance.UpgradeConfig()
481 self._UpgradeEnabledDiskTemplates()
482 if self.nodegroups is None:
483 self.nodegroups = {}
484 for nodegroup in self.nodegroups.values():
485 nodegroup.UpgradeConfig()
486 InstancePolicy.UpgradeDiskTemplates(
487 nodegroup.ipolicy, self.cluster.enabled_disk_templates)
488 if self.cluster.drbd_usermode_helper is None:
489 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
490 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
491 if self.networks is None:
492 self.networks = {}
493 for network in self.networks.values():
494 network.UpgradeConfig()
495 for disk in self.disks.values():
496 disk.UpgradeConfig()
497 if self.filters is None:
498 self.filters = {}
499 if self.maintenance is None:
500 self.maintenance = Maintenance.FromDict({})
501 self.maintenance.UpgradeConfig()
502
503 def _UpgradeEnabledDiskTemplates(self):
504 """Upgrade the cluster's enabled disk templates by inspecting the currently
505 enabled and/or used disk templates.
506
507 """
508 if not self.cluster.enabled_disk_templates:
509 template_set = \
510 set([d.dev_type for d in self.disks.values()])
511 if any(not inst.disks for inst in self.instances.values()):
512 template_set.add(constants.DT_DISKLESS)
513 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
514 if self.cluster.volume_group_name:
515 template_set.add(constants.DT_DRBD8)
516 template_set.add(constants.DT_PLAIN)
517 # Set enabled_disk_templates to the inferred disk templates. Order them
518 # according to a preference list that is based on Ganeti's history of
519 # supported disk templates.
520 self.cluster.enabled_disk_templates = []
521 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
522 if preferred_template in template_set:
523 self.cluster.enabled_disk_templates.append(preferred_template)
524 template_set.remove(preferred_template)
525 self.cluster.enabled_disk_templates.extend(list(template_set))
526 InstancePolicy.UpgradeDiskTemplates(
527 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
528
529
530 class NIC(ConfigObject):
531 """Config object representing a network card."""
532 __slots__ = ["name", "mac", "ip", "network",
533 "nicparams", "netinfo", "pci", "hvinfo"] + _UUID
534
535 @classmethod
536 def CheckParameterSyntax(cls, nicparams):
537 """Check the given parameters for validity.
538
539 @type nicparams: dict
540 @param nicparams: dictionary with parameter names/value
541 @raise errors.ConfigurationError: when a parameter is not valid
542
543 """
544 mode = nicparams[constants.NIC_MODE]
545 if (mode not in constants.NIC_VALID_MODES and
546 mode != constants.VALUE_AUTO):
547 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
548
549 if (mode == constants.NIC_MODE_BRIDGED and
550 not nicparams[constants.NIC_LINK]):
551 raise errors.ConfigurationError("Missing bridged NIC link")
552
553
554 class Filter(ConfigObject):
555 """Config object representing a filter rule."""
556 __slots__ = ["watermark", "priority",
557 "predicates", "action", "reason_trail"] + _UUID
558
559
560 class Maintenance(ConfigObject):
561 """Config object representing the state of the maintenance daemon"""
562 __slots__ = ["roundDelay", "jobs", "evacuated", "balance", "balanceThreshold",
563 "incidents", "serial_no"] + _TIMESTAMPS
564
565 def UpgradeConfig(self):
566 if self.serial_no is None:
567 self.serial_no = 1
568 if self.mtime is None:
569 self.mtime = time.time()
570 if self.ctime is None:
571 self.ctime = time.time()
572
573
574 class Disk(ConfigObject):
575 """Config object representing a block device."""
576 __slots__ = [
577 "forthcoming",
578 "name",
579 "dev_type",
580 "logical_id",
581 "children",
582 "nodes",
583 "iv_name",
584 "size",
585 "mode",
586 "params",
587 "spindles",
588 "pci",
589 "hvinfo",
590 "serial_no",
591 # dynamic_params is special. It depends on the node this instance
592 # is sent to, and should not be persisted.
593 "dynamic_params"
594 ] + _UUID + _TIMESTAMPS
595
596 def _ComputeAllNodes(self):
597 """Compute the list of all nodes covered by a device and its children."""
598 def _Helper(nodes, device):
599 """Recursively compute nodes given a top device."""
600 if device.dev_type in constants.DTS_DRBD:
601 nodes.extend(device.logical_id[:2])
602 if device.children:
603 for child in device.children:
604 _Helper(nodes, child)
605
606 all_nodes = list()
607 _Helper(all_nodes, self)
608 return tuple(set(all_nodes))
609
610 all_nodes = property(_ComputeAllNodes, None, None,
611 "List of names of all the nodes of a disk")
612
613 def CreateOnSecondary(self):
614 """Test if this device needs to be created on a secondary node."""
615 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
616
617 def AssembleOnSecondary(self):
618 """Test if this device needs to be assembled on a secondary node."""
619 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
620
621 def OpenOnSecondary(self):
622 """Test if this device needs to be opened on a secondary node."""
623 return self.dev_type in (constants.DT_PLAIN,)
624
625 def SupportsSnapshots(self):
626 """Test if this device supports snapshots."""
627 return self.dev_type in constants.DTS_SNAPSHOT_CAPABLE
628
629 def StaticDevPath(self):
630 """Return the device path if this device type has a static one.
631
632 Some devices (LVM for example) live always at the same /dev/ path,
633 irrespective of their status. For such devices, we return this
634 path, for others we return None.
635
636 @warning: The path returned is not a normalized pathname; callers
637 should check that it is a valid path.
638
639 """
640 if self.dev_type == constants.DT_PLAIN:
641 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
642 elif self.dev_type == constants.DT_BLOCK:
643 return self.logical_id[1]
644 elif self.dev_type == constants.DT_RBD:
645 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
646 return None
647
648 def ChildrenNeeded(self):
649 """Compute the needed number of children for activation.
650
651 This method will return either -1 (all children) or a positive
652 number denoting the minimum number of children needed for
653 activation (only mirrored devices will usually return >=0).
654
655 Currently, only DRBD8 supports diskless activation (therefore we
656 return 0), for all other we keep the previous semantics and return
657 -1.
658
659 """
660 if self.dev_type == constants.DT_DRBD8:
661 return 0
662 return -1
663
664 def IsBasedOnDiskType(self, dev_type):
665 """Check if the disk or its children are based on the given type.
666
667 @type dev_type: L{constants.DTS_BLOCK}
668 @param dev_type: the type to look for
669 @rtype: boolean
670 @return: boolean indicating if a device of the given type was found or not
671
672 """
673 if self.children:
674 for child in self.children:
675 if child.IsBasedOnDiskType(dev_type):
676 return True
677 return self.dev_type == dev_type
678
679 def GetNodes(self, node_uuid):
680 """This function returns the nodes this device lives on.
681
682 Given the node on which the parent of the device lives on (or, in
683 case of a top-level device, the primary node of the devices'
684 instance), this function will return a list of nodes on which this
685 devices needs to (or can) be assembled.
686
687 """
688 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
689 constants.DT_BLOCK, constants.DT_RBD,
690 constants.DT_EXT, constants.DT_SHARED_FILE,
691 constants.DT_GLUSTER]:
692 result = [node_uuid]
693 elif self.dev_type in constants.DTS_DRBD:
694 result = [self.logical_id[0], self.logical_id[1]]
695 if node_uuid not in result:
696 raise errors.ConfigurationError("DRBD device passed unknown node")
697 else:
698 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
699 return result
700
701 def GetPrimaryNode(self, node_uuid):
702 """This function returns the primary node of the device.
703
704 If the device is not a DRBD device, we still return the node the device
705 lives on.
706
707 """
708 if self.dev_type in constants.DTS_DRBD:
709 return self.logical_id[0]
710 return node_uuid
711
712 def ComputeNodeTree(self, parent_node_uuid):
713 """Compute the node/disk tree for this disk and its children.
714
715 This method, given the node on which the parent disk lives, will
716 return the list of all (node UUID, disk) pairs which describe the disk
717 tree in the most compact way. For example, a drbd/lvm stack
718 will be returned as (primary_node, drbd) and (secondary_node, drbd)
719 which represents all the top-level devices on the nodes.
720
721 """
722 my_nodes = self.GetNodes(parent_node_uuid)
723 result = [(node, self) for node in my_nodes]
724 if not self.children:
725 # leaf device
726 return result
727 for node in my_nodes:
728 for child in self.children:
729 child_result = child.ComputeNodeTree(node)
730 if len(child_result) == 1:
731 # child (and all its descendants) is simple, doesn't split
732 # over multiple hosts, so we don't need to describe it, our
733 # own entry for this node describes it completely
734 continue
735 else:
736 # check if child nodes differ from my nodes; note that
737 # subdisk can differ from the child itself, and be instead
738 # one of its descendants
739 for subnode, subdisk in child_result:
740 if subnode not in my_nodes:
741 result.append((subnode, subdisk))
742 # otherwise child is under our own node, so we ignore this
743 # entry (but probably the other results in the list will
744 # be different)
745 return result
746
747 def ComputeGrowth(self, amount):
748 """Compute the per-VG growth requirements.
749
750 This only works for VG-based disks.
751
752 @type amount: integer
753 @param amount: the desired increase in (user-visible) disk space
754 @rtype: dict
755 @return: a dictionary of volume-groups and the required size
756
757 """
758 if self.dev_type == constants.DT_PLAIN:
759 return {self.logical_id[0]: amount}
760 elif self.dev_type == constants.DT_DRBD8:
761 if self.children:
762 return self.children[0].ComputeGrowth(amount)
763 else:
764 return {}
765 else:
766 # Other disk types do not require VG space
767 return {}
768
769 def RecordGrow(self, amount):
770 """Update the size of this disk after growth.
771
772 This method recurses over the disks's children and updates their
773 size correspondigly. The method needs to be kept in sync with the
774 actual algorithms from bdev.
775
776 """
777 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
778 constants.DT_RBD, constants.DT_EXT,
779 constants.DT_SHARED_FILE, constants.DT_GLUSTER):
780 self.size += amount
781 elif self.dev_type == constants.DT_DRBD8:
782 if self.children:
783 self.children[0].RecordGrow(amount)
784 self.size += amount
785 else:
786 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
787 " disk type %s" % self.dev_type)
788
789 def Update(self, size=None, mode=None, spindles=None):
790 """Apply changes to size, spindles and mode.
791
792 """
793 if self.dev_type == constants.DT_DRBD8:
794 if self.children:
795 self.children[0].Update(size=size, mode=mode)
796 else:
797 assert not self.children
798
799 if size is not None:
800 self.size = size
801 if mode is not None:
802 self.mode = mode
803 if spindles is not None:
804 self.spindles = spindles
805
806 def UnsetSize(self):
807 """Sets recursively the size to zero for the disk and its children.
808
809 """
810 if self.children:
811 for child in self.children:
812 child.UnsetSize()
813 self.size = 0
814
815 def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
816 """Updates the dynamic disk params for the given node.
817
818 This is mainly used for drbd, which needs ip/port configuration.
819
820 Arguments:
821 - target_node_uuid: the node UUID we wish to configure for
822 - nodes_ip: a mapping of node name to ip
823
824 The target_node must exist in nodes_ip, and should be one of the
825 nodes in the logical ID if this device is a DRBD device.
826
827 """
828 if self.children:
829 for child in self.children:
830 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
831
832 dyn_disk_params = {}
833 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
834 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
835 if target_node_uuid not in (pnode_uuid, snode_uuid):
836 # disk object is being sent to neither the primary nor the secondary
837 # node. reset the dynamic parameters, the target node is not
838 # supposed to use them.
839 self.dynamic_params = dyn_disk_params
840 return
841
842 pnode_ip = nodes_ip.get(pnode_uuid, None)
843 snode_ip = nodes_ip.get(snode_uuid, None)
844 if pnode_ip is None or snode_ip is None:
845 raise errors.ConfigurationError("Can't find primary or secondary node"
846 " for %s" % str(self))
847 if pnode_uuid == target_node_uuid:
848 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
849 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
850 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
851 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
852 else: # it must be secondary, we tested above
853 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
854 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
855 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
856 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
857
858 self.dynamic_params = dyn_disk_params
859
860 # pylint: disable=W0221
861 def ToDict(self, include_dynamic_params=False,
862 _with_private=False):
863 """Disk-specific conversion to standard python types.
864
865 This replaces the children lists of objects with lists of
866 standard python types.
867
868 """
869 bo = super(Disk, self).ToDict(_with_private=_with_private)
870 if not include_dynamic_params and "dynamic_params" in bo:
871 del bo["dynamic_params"]
872
873 if _with_private and "logical_id" in bo:
874 mutable_id = list(bo["logical_id"])
875 mutable_id[5] = mutable_id[5].Get()
876 bo["logical_id"] = tuple(mutable_id)
877
878 for attr in ("children",):
879 alist = bo.get(attr, None)
880 if alist:
881 bo[attr] = outils.ContainerToDicts(alist)
882 return bo
883
884 @classmethod
885 def FromDict(cls, val):
886 """Custom function for Disks
887
888 """
889 obj = super(Disk, cls).FromDict(val)
890 if obj.children:
891 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
892 if obj.logical_id and isinstance(obj.logical_id, list):
893 obj.logical_id = tuple(obj.logical_id)
894 if obj.dev_type in constants.DTS_DRBD:
895 # we need a tuple of length six here
896 if len(obj.logical_id) < 6:
897 obj.logical_id += (None,) * (6 - len(obj.logical_id))
898 # If we do have a tuple of length 6, make the last entry (secret key)
899 # private
900 elif (len(obj.logical_id) == 6 and
901 not isinstance(obj.logical_id[-1], serializer.Private)):
902 obj.logical_id = obj.logical_id[:-1] + \
903 (serializer.Private(obj.logical_id[-1]),)
904 return obj
905
906 def __str__(self):
907 """Custom str() formatter for disks.
908
909 """
910 if self.dev_type == constants.DT_PLAIN:
911 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
912 elif self.dev_type in constants.DTS_DRBD:
913 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
914 val = "<DRBD8("
915
916 val += ("hosts=%s/%d-%s/%d, port=%s, " %
917 (node_a, minor_a, node_b, minor_b, port))
918 if self.children and self.children.count(None) == 0:
919 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
920 else:
921 val += "no local storage"
922 else:
923 val = ("<Disk(type=%s, logical_id=%s, children=%s" %
924 (self.dev_type, self.logical_id, self.children))
925 if self.iv_name is None:
926 val += ", not visible"
927 else:
928 val += ", visible as /dev/%s" % self.iv_name
929 if self.spindles is not None:
930 val += ", spindles=%s" % self.spindles
931 if isinstance(self.size, int):
932 val += ", size=%dm)>" % self.size
933 else:
934 val += ", size='%s')>" % (self.size,)
935 return val
936
937 def Verify(self):
938 """Checks that this disk is correctly configured.
939
940 """
941 all_errors = []
942 if self.mode not in constants.DISK_ACCESS_SET:
943 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
944 return all_errors
945
946 def UpgradeConfig(self):
947 """Fill defaults for missing configuration values.
948
949 """
950 if self.children:
951 for child in self.children:
952 child.UpgradeConfig()
953
954 # FIXME: Make this configurable in Ganeti 2.7
955 # Params should be an empty dict that gets filled any time needed
956 # In case of ext template we allow arbitrary params that should not
957 # be overrided during a config reload/upgrade.
958 if not self.params or not isinstance(self.params, dict):
959 self.params = {}
960
961 # add here config upgrade for this disk
962 if self.serial_no is None:
963 self.serial_no = 1
964 if self.mtime is None:
965 self.mtime = time.time()
966 if self.ctime is None:
967 self.ctime = time.time()
968
969 # map of legacy device types (mapping differing LD constants to new
970 # DT constants)
971 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
972 if self.dev_type in LEG_DEV_TYPE_MAP:
973 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
974
975 @staticmethod
976 def ComputeLDParams(disk_template, disk_params):
977 """Computes Logical Disk parameters from Disk Template parameters.
978
979 @type disk_template: string
980 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
981 @type disk_params: dict
982 @param disk_params: disk template parameters;
983 dict(template_name -> parameters
984 @rtype: list(dict)
985 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
986 contains the LD parameters of the node. The tree is flattened in-order.
987
988 """
989 if disk_template not in constants.DISK_TEMPLATES:
990 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
991
992 assert disk_template in disk_params
993
994 result = list()
995 dt_params = disk_params[disk_template]
996
997 if disk_template == constants.DT_DRBD8:
998 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
999 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
1000 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
1001 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
1002 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
1003 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
1004 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
1005 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
1006 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
1007 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
1008 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
1009 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
1010 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
1011 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
1012 }))
1013
1014 # data LV
1015 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
1016 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
1017 }))
1018
1019 # metadata LV
1020 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
1021 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
1022 }))
1023
1024 else:
1025 defaults = constants.DISK_LD_DEFAULTS[disk_template]
1026 values = {}
1027 for field in defaults:
1028 values[field] = dt_params[field]
1029 result.append(FillDict(defaults, values))
1030
1031 return result
1032
1033
1034 class InstancePolicy(ConfigObject):
1035 """Config object representing instance policy limits dictionary.
1036
1037 Note that this object is not actually used in the config, it's just
1038 used as a placeholder for a few functions.
1039
1040 """
1041 @classmethod
1042 def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
1043 """Upgrades the ipolicy configuration."""
1044 if constants.IPOLICY_DTS in ipolicy:
1045 if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
1046 set(enabled_disk_templates)):
1047 ipolicy[constants.IPOLICY_DTS] = list(
1048 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
1049
1050 @classmethod
1051 def CheckParameterSyntax(cls, ipolicy, check_std):
1052 """ Check the instance policy for validity.
1053
1054 @type ipolicy: dict
1055 @param ipolicy: dictionary with min/max/std specs and policies
1056 @type check_std: bool
1057 @param check_std: Whether to check std value or just assume compliance
1058 @raise errors.ConfigurationError: when the policy is not legal
1059
1060 """
1061 InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
1062 if constants.IPOLICY_DTS in ipolicy:
1063 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
1064 for key in constants.IPOLICY_PARAMETERS:
1065 if key in ipolicy:
1066 InstancePolicy.CheckParameter(key, ipolicy[key])
1067 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1068 if wrong_keys:
1069 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
1070 utils.CommaJoin(wrong_keys))
1071
1072 @classmethod
1073 def _CheckIncompleteSpec(cls, spec, keyname):
1074 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
1075 if missing_params:
1076 msg = ("Missing instance specs parameters for %s: %s" %
1077 (keyname, utils.CommaJoin(missing_params)))
1078 raise errors.ConfigurationError(msg)
1079
1080 @classmethod
1081 def CheckISpecSyntax(cls, ipolicy, check_std):
1082 """Check the instance policy specs for validity.
1083
1084 @type ipolicy: dict
1085 @param ipolicy: dictionary with min/max/std specs
1086 @type check_std: bool
1087 @param check_std: Whether to check std value or just assume compliance
1088 @raise errors.ConfigurationError: when specs are not valid
1089
1090 """
1091 if constants.ISPECS_MINMAX not in ipolicy:
1092 # Nothing to check
1093 return
1094
1095 if check_std and constants.ISPECS_STD not in ipolicy:
1096 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
1097 raise errors.ConfigurationError(msg)
1098 stdspec = ipolicy.get(constants.ISPECS_STD)
1099 if check_std:
1100 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
1101
1102 if not ipolicy[constants.ISPECS_MINMAX]:
1103 raise errors.ConfigurationError("Empty minmax specifications")
1104 std_is_good = False
1105 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
1106 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
1107 if missing:
1108 msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
1109 raise errors.ConfigurationError(msg)
1110 for (key, spec) in minmaxspecs.items():
1111 InstancePolicy._CheckIncompleteSpec(spec, key)
1112
1113 spec_std_ok = True
1114 for param in constants.ISPECS_PARAMETERS:
1115 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
1116 param, check_std)
1117 spec_std_ok = spec_std_ok and par_std_ok
1118 std_is_good = std_is_good or spec_std_ok
1119 if not std_is_good:
1120 raise errors.ConfigurationError("Invalid std specifications")
1121
1122 @classmethod
1123 def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1124 """Check the instance policy specs for validity on a given key.
1125
1126 We check if the instance specs makes sense for a given key, that is
1127 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1128
1129 @type minmaxspecs: dict
1130 @param minmaxspecs: dictionary with min and max instance spec
1131 @type stdspec: dict
1132 @param stdspec: dictionary with standard instance spec
1133 @type name: string
1134 @param name: what are the limits for
1135 @type check_std: bool
1136 @param check_std: Whether to check std value or just assume compliance
1137 @rtype: bool
1138 @return: C{True} when specs are valid, C{False} when standard spec for the
1139 given name is not valid
1140 @raise errors.ConfigurationError: when min/max specs for the given name
1141 are not valid
1142
1143 """
1144 minspec = minmaxspecs[constants.ISPECS_MIN]
1145 maxspec = minmaxspecs[constants.ISPECS_MAX]
1146 min_v = minspec[name]
1147 max_v = maxspec[name]
1148
1149 if min_v > max_v:
1150 err = ("Invalid specification of min/max values for %s: %s/%s" %
1151 (name, min_v, max_v))
1152 raise errors.ConfigurationError(err)
1153 elif check_std:
1154 std_v = stdspec.get(name, min_v)
1155 return std_v >= min_v and std_v <= max_v
1156 else:
1157 return True
1158
1159 @classmethod
1160 def CheckDiskTemplates(cls, disk_templates):
1161 """Checks the disk templates for validity.
1162
1163 """
1164 if not disk_templates:
1165 raise errors.ConfigurationError("Instance policy must contain" +
1166 " at least one disk template")
1167 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1168 if wrong:
1169 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1170 utils.CommaJoin(wrong))
1171
1172 @classmethod
1173 def CheckParameter(cls, key, value):
1174 """Checks a parameter.
1175
1176 Currently we expect all parameters to be float values.
1177
1178 """
1179 try:
1180 float(value)
1181 except (TypeError, ValueError), err:
1182 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1183 " '%s', error: %s" % (key, value, err))
1184
1185
1186 def GetOSImage(osparams):
1187 """Gets the OS image value from the OS parameters.
1188
1189 @type osparams: L{dict} or NoneType
1190 @param osparams: OS parameters or None
1191
1192 @rtype: string or NoneType
1193 @return:
1194 value of OS image contained in OS parameters, or None if the OS
1195 parameters are None or the OS parameters do not contain an OS
1196 image
1197
1198 """
1199 if osparams is None:
1200 return None
1201 else:
1202 return osparams.get("os-image", None)
1203
1204
1205 def PutOSImage(osparams, os_image):
1206 """Update OS image value in the OS parameters
1207
1208 @type osparams: L{dict}
1209 @param osparams: OS parameters
1210
1211 @type os_image: string
1212 @param os_image: OS image
1213
1214 @rtype: NoneType
1215 @return: None
1216
1217 """
1218 osparams["os-image"] = os_image
1219
1220
1221 class Instance(TaggableObject):
1222 """Config object representing an instance."""
1223 __slots__ = [
1224 "forthcoming",
1225 "name",
1226 "primary_node",
1227 "secondary_nodes",
1228 "os",
1229 "hypervisor",
1230 "hvparams",
1231 "beparams",
1232 "osparams",
1233 "osparams_private",
1234 "admin_state",
1235 "admin_state_source",
1236 "nics",
1237 "disks",
1238 "disks_info",
1239 "disk_template",
1240 "disks_active",
1241 "network_port",
1242 "serial_no",
1243 ] + _TIMESTAMPS + _UUID
1244
1245 def FindDisk(self, idx):
1246 """Find a disk given having a specified index.
1247
1248 This is just a wrapper that does validation of the index.
1249
1250 @type idx: int
1251 @param idx: the disk index
1252 @rtype: string
1253 @return: the corresponding disk's uuid
1254 @raise errors.OpPrereqError: when the given index is not valid
1255
1256 """
1257 try:
1258 idx = int(idx)
1259 return self.disks[idx]
1260 except (TypeError, ValueError), err:
1261 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1262 errors.ECODE_INVAL)
1263 except IndexError:
1264 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1265 " 0 to %d" % (idx, len(self.disks) - 1),
1266 errors.ECODE_INVAL)
1267
1268 def ToDict(self, _with_private=False):
1269 """Instance-specific conversion to standard python types.
1270
1271 This replaces the children lists of objects with lists of standard
1272 python types.
1273
1274 """
1275 bo = super(Instance, self).ToDict(_with_private=_with_private)
1276
1277 if _with_private:
1278 bo["osparams_private"] = self.osparams_private.Unprivate()
1279
1280 for attr in ("nics",):
1281 alist = bo.get(attr, None)
1282 if alist:
1283 nlist = outils.ContainerToDicts(alist)
1284 else:
1285 nlist = []
1286 bo[attr] = nlist
1287
1288 if 'disk_template' in bo:
1289 del bo['disk_template']
1290
1291 return bo
1292
1293 @classmethod
1294 def FromDict(cls, val):
1295 """Custom function for instances.
1296
1297 """
1298 if "admin_state" not in val:
1299 if val.get("admin_up", False):
1300 val["admin_state"] = constants.ADMINST_UP
1301 else:
1302 val["admin_state"] = constants.ADMINST_DOWN
1303 if "admin_up" in val:
1304 del val["admin_up"]
1305 obj = super(Instance, cls).FromDict(val)
1306 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1307
1308 # attribute 'disks_info' is only present when deserializing from a RPC
1309 # call in the backend
1310 disks_info = getattr(obj, "disks_info", None)
1311 if disks_info:
1312 obj.disks_info = outils.ContainerFromDicts(disks_info, list, Disk)
1313
1314 return obj
1315
1316 def UpgradeConfig(self):
1317 """Fill defaults for missing configuration values.
1318
1319 """
1320 if self.admin_state_source is None:
1321 self.admin_state_source = constants.ADMIN_SOURCE
1322 for nic in self.nics:
1323 nic.UpgradeConfig()
1324 if self.disks is None:
1325 self.disks = []
1326 if self.hvparams:
1327 for key in constants.HVC_GLOBALS:
1328 try:
1329 del self.hvparams[key]
1330 except KeyError:
1331 pass
1332 if self.osparams is None:
1333 self.osparams = {}
1334 if self.osparams_private is None:
1335 self.osparams_private = serializer.PrivateDict()
1336 UpgradeBeParams(self.beparams)
1337 if self.disks_active is None:
1338 self.disks_active = self.admin_state == constants.ADMINST_UP
1339
1340
1341 class OS(ConfigObject):
1342 """Config object representing an operating system.
1343
1344 @type supported_parameters: list
1345 @ivar supported_parameters: a list of tuples, name and description,
1346 containing the supported parameters by this OS
1347
1348 @type VARIANT_DELIM: string
1349 @cvar VARIANT_DELIM: the variant delimiter
1350
1351 """
1352 __slots__ = [
1353 "name",
1354 "path",
1355 "api_versions",
1356 "create_script",
1357 "create_script_untrusted",
1358 "export_script",
1359 "import_script",
1360 "rename_script",
1361 "verify_script",
1362 "supported_variants",
1363 "supported_parameters",
1364 ]
1365
1366 VARIANT_DELIM = "+"
1367
1368 @classmethod
1369 def SplitNameVariant(cls, name):
1370 """Splits the name into the proper name and variant.
1371
1372 @param name: the OS (unprocessed) name
1373 @rtype: list
1374 @return: a list of two elements; if the original name didn't
1375 contain a variant, it's returned as an empty string
1376
1377 """
1378 nv = name.split(cls.VARIANT_DELIM, 1)
1379 if len(nv) == 1:
1380 nv.append("")
1381 return nv
1382
1383 @classmethod
1384 def GetName(cls, name):
1385 """Returns the proper name of the os (without the variant).
1386
1387 @param name: the OS (unprocessed) name
1388
1389 """
1390 return cls.SplitNameVariant(name)[0]
1391
1392 @classmethod
1393 def GetVariant(cls, name):
1394 """Returns the variant the os (without the base name).
1395
1396 @param name: the OS (unprocessed) name
1397
1398 """
1399 return cls.SplitNameVariant(name)[1]
1400
1401 def IsTrusted(self):
1402 """Returns whether this OS is trusted.
1403
1404 @rtype: bool
1405 @return: L{True} if this OS is trusted, L{False} otherwise
1406
1407 """
1408 return not self.create_script_untrusted
1409
1410
1411 class ExtStorage(ConfigObject):
1412 """Config object representing an External Storage Provider.
1413
1414 """
1415 __slots__ = [
1416 "name",
1417 "path",
1418 "create_script",
1419 "remove_script",
1420 "grow_script",
1421 "attach_script",
1422 "detach_script",
1423 "setinfo_script",
1424 "verify_script",
1425 "snapshot_script",
1426 "open_script",
1427 "close_script",
1428 "supported_parameters",
1429 ]
1430
1431
1432 class NodeHvState(ConfigObject):
1433 """Hypvervisor state on a node.
1434
1435 @ivar mem_total: Total amount of memory
1436 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1437 available)
1438 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1439 rounding
1440 @ivar mem_inst: Memory used by instances living on node
1441 @ivar cpu_total: Total node CPU core count
1442 @ivar cpu_node: Number of CPU cores reserved for the node itself
1443
1444 """
1445 __slots__ = [
1446 "mem_total",
1447 "mem_node",
1448 "mem_hv",
1449 "mem_inst",
1450 "cpu_total",
1451 "cpu_node",
1452 ] + _TIMESTAMPS
1453
1454
1455 class NodeDiskState(ConfigObject):
1456 """Disk state on a node.
1457
1458 """
1459 __slots__ = [
1460 "total",
1461 "reserved",
1462 "overhead",
1463 ] + _TIMESTAMPS
1464
1465
1466 class Node(TaggableObject):
1467 """Config object representing a node.
1468
1469 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1470 @ivar hv_state_static: Hypervisor state overriden by user
1471 @ivar disk_state: Disk state (e.g. free space)
1472 @ivar disk_state_static: Disk state overriden by user
1473
1474 """
1475 __slots__ = [
1476 "name",
1477 "primary_ip",
1478 "secondary_ip",
1479 "serial_no",
1480 "master_candidate",
1481 "offline",
1482 "drained",
1483 "group",
1484 "master_capable",
1485 "vm_capable",
1486 "ndparams",
1487 "powered",
1488 "hv_state",
1489 "hv_state_static",
1490 "disk_state",
1491 "disk_state_static",
1492 ] + _TIMESTAMPS + _UUID
1493
1494 def UpgradeConfig(self):
1495 """Fill defaults for missing configuration values.
1496
1497 """
1498 # pylint: disable=E0203
1499 # because these are "defined" via slots, not manually
1500 if self.master_capable is None:
1501 self.master_capable = True
1502
1503 if self.vm_capable is None:
1504 self.vm_capable = True
1505
1506 if self.ndparams is None:
1507 self.ndparams = {}
1508 # And remove any global parameter
1509 for key in constants.NDC_GLOBALS:
1510 if key in self.ndparams:
1511 logging.warning("Ignoring %s node parameter for node %s",
1512 key, self.name)
1513 del self.ndparams[key]
1514
1515 if self.powered is None:
1516 self.powered = True
1517
1518 if self.hv_state_static is None:
1519 self.hv_state_static = {}
1520 if self.disk_state_static is None:
1521 self.disk_state_static = {}
1522
1523 def ToDict(self, _with_private=False):
1524 """Custom function for serializing.
1525
1526 """
1527 data = super(Node, self).ToDict(_with_private=_with_private)
1528
1529 hv_state = data.get("hv_state", None)
1530 if hv_state is not None:
1531 data["hv_state"] = outils.ContainerToDicts(hv_state)
1532
1533 disk_state = data.get("disk_state", None)
1534 if disk_state is not None:
1535 data["disk_state"] = \
1536 dict((key, outils.ContainerToDicts(value))
1537 for (key, value) in disk_state.items())
1538
1539 return data
1540
1541 @classmethod
1542 def FromDict(cls, val):
1543 """Custom function for deserializing.
1544
1545 """
1546 obj = super(Node, cls).FromDict(val)
1547
1548 if obj.hv_state is not None:
1549 obj.hv_state = \
1550 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1551
1552 if obj.disk_state is not None:
1553 obj.disk_state = \
1554 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1555 for (key, value) in obj.disk_state.items())
1556
1557 return obj
1558
1559
1560 class NodeGroup(TaggableObject):
1561 """Config object representing a node group."""
1562 __slots__ = [
1563 "name",
1564 "members",
1565 "ndparams",
1566 "diskparams",
1567 "ipolicy",
1568 "serial_no",
1569 "hv_state_static",
1570 "disk_state_static",
1571 "alloc_policy",
1572 "networks",
1573 ] + _TIMESTAMPS + _UUID
1574
1575 def ToDict(self, _with_private=False):
1576 """Custom function for nodegroup.
1577
1578 This discards the members object, which gets recalculated and is only kept
1579 in memory.
1580
1581 """
1582 mydict = super(NodeGroup, self).ToDict(_with_private=_with_private)
1583 del mydict["members"]
1584 return mydict
1585
1586 @classmethod
1587 def FromDict(cls, val):
1588 """Custom function for nodegroup.
1589
1590 The members slot is initialized to an empty list, upon deserialization.
1591
1592 """
1593 obj = super(NodeGroup, cls).FromDict(val)
1594 obj.members = []
1595 return obj
1596
1597 def UpgradeConfig(self):
1598 """Fill defaults for missing configuration values.
1599
1600 """
1601 if self.ndparams is None:
1602 self.ndparams = {}
1603
1604 if self.serial_no is None:
1605 self.serial_no = 1
1606
1607 if self.alloc_policy is None:
1608 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1609
1610 # We only update mtime, and not ctime, since we would not be able
1611 # to provide a correct value for creation time.
1612 if self.mtime is None:
1613 self.mtime = time.time()
1614
1615 if self.diskparams is None:
1616 self.diskparams = {}
1617 if self.ipolicy is None:
1618 self.ipolicy = MakeEmptyIPolicy()
1619
1620 if self.hv_state_static is None:
1621 self.hv_state_static = {}
1622 if self.disk_state_static is None:
1623 self.disk_state_static = {}
1624
1625 if self.networks is None:
1626 self.networks = {}
1627
1628 for network, netparams in self.networks.items():
1629 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1630
1631 def FillND(self, node):
1632 """Return filled out ndparams for L{objects.Node}
1633
1634 @type node: L{objects.Node}
1635 @param node: A Node object to fill
1636 @return a copy of the node's ndparams with defaults filled
1637
1638 """
1639 return self.SimpleFillND(node.ndparams)
1640
1641 def SimpleFillND(self, ndparams):
1642 """Fill a given ndparams dict with defaults.
1643
1644 @type ndparams: dict
1645 @param ndparams: the dict to fill
1646 @rtype: dict
1647 @return: a copy of the passed in ndparams with missing keys filled
1648 from the node group defaults
1649
1650 """
1651 return FillDict(self.ndparams, ndparams)
1652
1653
1654 class Cluster(TaggableObject):
1655 """Config object representing the cluster."""
1656 __slots__ = [
1657 "serial_no",
1658 "rsahostkeypub",
1659 "dsahostkeypub",
1660 "highest_used_port",
1661 "tcpudp_port_pool",
1662 "mac_prefix",
1663 "volume_group_name",
1664 "reserved_lvs",
1665 "drbd_usermode_helper",
1666 "default_bridge",
1667 "default_hypervisor",
1668 "master_node",
1669 "master_ip",
1670 "master_netdev",
1671 "master_netmask",
1672 "use_external_mip_script",
1673 "cluster_name",
1674 "file_storage_dir",
1675 "shared_file_storage_dir",
1676 "gluster_storage_dir",
1677 "enabled_hypervisors",
1678 "hvparams",
1679 "ipolicy",
1680 "os_hvp",
1681 "beparams",
1682 "osparams",
1683 "osparams_private_cluster",
1684 "nicparams",
1685 "ndparams",
1686 "diskparams",
1687 "candidate_pool_size",
1688 "modify_etc_hosts",
1689 "modify_ssh_setup",
1690 "maintain_node_health",
1691 "uid_pool",
1692 "default_iallocator",
1693 "default_iallocator_params",
1694 "hidden_os",
1695 "blacklisted_os",
1696 "primary_ip_family",
1697 "prealloc_wipe_disks",
1698 "hv_state_static",
1699 "disk_state_static",
1700 "enabled_disk_templates",
1701 "candidate_certs",
1702 "max_running_jobs",
1703 "max_tracked_jobs",
1704 "install_image",
1705 "instance_communication_network",
1706 "zeroing_image",
1707 "compression_tools",
1708 "enabled_user_shutdown",
1709 "data_collectors",
1710 "diagnose_data_collector_filename",
1711 "ssh_key_type",
1712 "ssh_key_bits",
1713 ] + _TIMESTAMPS + _UUID
1714
1715 def UpgradeConfig(self):
1716 """Fill defaults for missing configuration values.
1717
1718 """
1719 # pylint: disable=E0203
1720 # because these are "defined" via slots, not manually
1721 if self.hvparams is None:
1722 self.hvparams = constants.HVC_DEFAULTS
1723 else:
1724 for hypervisor in constants.HYPER_TYPES:
1725 try:
1726 existing_params = self.hvparams[hypervisor]
1727 except KeyError:
1728 existing_params = {}
1729 self.hvparams[hypervisor] = FillDict(
1730 constants.HVC_DEFAULTS[hypervisor], existing_params)
1731
1732 if self.os_hvp is None:
1733 self.os_hvp = {}
1734
1735 if self.osparams is None:
1736 self.osparams = {}
1737 # osparams_private_cluster added in 2.12
1738 if self.osparams_private_cluster is None:
1739 self.osparams_private_cluster = {}
1740
1741 self.ndparams = UpgradeNDParams(self.ndparams)
1742
1743 self.beparams = UpgradeGroupedParams(self.beparams,
1744 constants.BEC_DEFAULTS)
1745 for beparams_group in self.beparams:
1746 UpgradeBeParams(self.beparams[beparams_group])
1747
1748 migrate_default_bridge = not self.nicparams
1749 self.nicparams = UpgradeGroupedParams(self.nicparams,
1750 constants.NICC_DEFAULTS)
1751 if migrate_default_bridge:
1752 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1753 self.default_bridge
1754
1755 if self.modify_etc_hosts is None:
1756 self.modify_etc_hosts = True
1757
1758 if self.modify_ssh_setup is None:
1759 self.modify_ssh_setup = True
1760
1761 # default_bridge is no longer used in 2.1. The slot is left there to
1762 # support auto-upgrading. It can be removed once we decide to deprecate
1763 # upgrading straight from 2.0.
1764 if self.default_bridge is not None:
1765 self.default_bridge = None
1766
1767 # default_hypervisor is just the first enabled one in 2.1. This slot and
1768 # code can be removed once upgrading straight from 2.0 is deprecated.
1769 if self.default_hypervisor is not None:
1770 self.enabled_hypervisors = ([self.default_hypervisor] +
1771 [hvname for hvname in self.enabled_hypervisors
1772 if hvname != self.default_hypervisor])
1773 self.default_hypervisor = None
1774
1775 # maintain_node_health added after 2.1.1
1776 if self.maintain_node_health is None:
1777 self.maintain_node_health = False
1778
1779 if self.uid_pool is None:
1780 self.uid_pool = []
1781
1782 if self.default_iallocator is None:
1783 self.default_iallocator = ""
1784
1785 if self.default_iallocator_params is None:
1786 self.default_iallocator_params = {}
1787
1788 # reserved_lvs added before 2.2
1789 if self.reserved_lvs is None:
1790 self.reserved_lvs = []
1791
1792 # hidden and blacklisted operating systems added before 2.2.1
1793 if self.hidden_os is None:
1794 self.hidden_os = []
1795
1796 if self.blacklisted_os is None:
1797 self.blacklisted_os = []
1798
1799 # primary_ip_family added before 2.3
1800 if self.primary_ip_family is None:
1801 self.primary_ip_family = AF_INET
1802
1803 if self.master_netmask is None:
1804 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1805 self.master_netmask = ipcls.iplen
1806
1807 if self.prealloc_wipe_disks is None:
1808 self.prealloc_wipe_disks = False
1809
1810 # shared_file_storage_dir added before 2.5
1811 if self.shared_file_storage_dir is None:
1812 self.shared_file_storage_dir = ""
1813
1814 # gluster_storage_dir added in 2.11
1815 if self.gluster_storage_dir is None:
1816 self.gluster_storage_dir = ""
1817
1818 if self.use_external_mip_script is None:
1819 self.use_external_mip_script = False
1820
1821 if self.diskparams:
1822 self.diskparams = UpgradeDiskParams(self.diskparams)
1823 else:
1824 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1825
1826 # instance policy added before 2.6
1827 if self.ipolicy is None:
1828 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1829 else:
1830 # we can either make sure to upgrade the ipolicy always, or only
1831 # do it in some corner cases (e.g. missing keys); note that this
1832 # will break any removal of keys from the ipolicy dict
1833 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1834 if wrongkeys:
1835 # These keys would be silently removed by FillIPolicy()
1836 msg = ("Cluster instance policy contains spurious keys: %s" %
1837 utils.CommaJoin(wrongkeys))
1838 raise errors.ConfigurationError(msg)
1839 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1840
1841 # hv_state_static added in 2.7
1842 if self.hv_state_static is None:
1843 self.hv_state_static = {}
1844 if self.disk_state_static is None:
1845 self.disk_state_static = {}
1846
1847 if self.candidate_certs is None:
1848 self.candidate_certs = {}
1849
1850 if self.max_running_jobs is None:
1851 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
1852
1853 if self.max_tracked_jobs is None:
1854 self.max_tracked_jobs = constants.LUXID_MAXIMAL_TRACKED_JOBS_DEFAULT
1855
1856 if self.instance_communication_network is None:
1857 self.instance_communication_network = ""
1858
1859 if self.install_image is None:
1860 self.install_image = ""
1861
1862 if self.compression_tools is None:
1863 self.compression_tools = constants.IEC_DEFAULT_TOOLS
1864
1865 if self.enabled_user_shutdown is None:
1866 self.enabled_user_shutdown = False
1867
1868 if self.ssh_key_type is None:
1869 self.ssh_key_type = constants.SSH_DEFAULT_KEY_TYPE
1870
1871 if self.ssh_key_bits is None:
1872 self.ssh_key_bits = constants.SSH_DEFAULT_KEY_BITS
1873
1874 @property
1875 def primary_hypervisor(self):
1876 """The first hypervisor is the primary.
1877
1878 Useful, for example, for L{Node}'s hv/disk state.
1879
1880 """
1881 return self.enabled_hypervisors[0]
1882
1883 def ToDict(self, _with_private=False):
1884 """Custom function for cluster.
1885
1886 """
1887 mydict = super(Cluster, self).ToDict(_with_private=_with_private)
1888
1889 # Explicitly save private parameters.
1890 if _with_private:
1891 for os in mydict["osparams_private_cluster"]:
1892 mydict["osparams_private_cluster"][os] = \
1893 self.osparams_private_cluster[os].Unprivate()
1894
1895 if self.tcpudp_port_pool is None:
1896 tcpudp_port_pool = []
1897 else:
1898 tcpudp_port_pool = list(self.tcpudp_port_pool)
1899
1900 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1901
1902 return mydict
1903
1904 @classmethod
1905 def FromDict(cls, val):
1906 """Custom function for cluster.
1907
1908 """
1909 obj = super(Cluster, cls).FromDict(val)
1910
1911 if obj.tcpudp_port_pool is None:
1912 obj.tcpudp_port_pool = set()
1913 elif not isinstance(obj.tcpudp_port_pool, set):
1914 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1915
1916 return obj
1917
1918 def SimpleFillDP(self, diskparams):
1919 """Fill a given diskparams dict with cluster defaults.
1920
1921 @param diskparams: The diskparams
1922 @return: The defaults dict
1923
1924 """
1925 return FillDiskParams(self.diskparams, diskparams)
1926
1927 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1928 """Get the default hypervisor parameters for the cluster.
1929
1930 @param hypervisor: the hypervisor name
1931 @param os_name: if specified, we'll also update the defaults for this OS
1932 @param skip_keys: if passed, list of keys not to use
1933 @return: the defaults dict
1934
1935 """
1936 if skip_keys is None:
1937 skip_keys = []
1938
1939 fill_stack = [self.hvparams.get(hypervisor, {})]
1940 if os_name is not None:
1941 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1942 fill_stack.append(os_hvp)
1943
1944 ret_dict = {}
1945 for o_dict in fill_stack:
1946 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1947
1948 return ret_dict
1949
1950 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1951 """Fill a given hvparams dict with cluster defaults.
1952
1953 @type hv_name: string
1954 @param hv_name: the hypervisor to use
1955 @type os_name: string
1956 @param os_name: the OS to use for overriding the hypervisor defaults
1957 @type skip_globals: boolean
1958 @param skip_globals: if True, the global hypervisor parameters will
1959 not be filled
1960 @rtype: dict
1961 @return: a copy of the given hvparams with missing keys filled from
1962 the cluster defaults
1963
1964 """
1965 if skip_globals:
1966 skip_keys = constants.HVC_GLOBALS
1967 else:
1968 skip_keys = []
1969
1970 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1971 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1972
1973 def FillHV(self, instance, skip_globals=False):
1974 """Fill an instance's hvparams dict with cluster defaults.
1975
1976 @type instance: L{objects.Instance}
1977 @param instance: the instance parameter to fill
1978 @type skip_globals: boolean
1979 @param skip_globals: if True, the global hypervisor parameters will
1980 not be filled
1981 @rtype: dict
1982 @return: a copy of the instance's hvparams with missing keys filled from
1983 the cluster defaults
1984
1985 """
1986 return self.SimpleFillHV(instance.hypervisor, instance.os,
1987 instance.hvparams, skip_globals)
1988
1989 def SimpleFillBE(self, beparams):
1990 """Fill a given beparams dict with cluster defaults.
1991
1992 @type beparams: dict
1993 @param beparams: the dict to fill
1994 @rtype: dict
1995 @return: a copy of the passed in beparams with missing keys filled
1996 from the cluster defaults
1997
1998 """
1999 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
2000
2001 def FillBE(self, instance):
2002 """Fill an instance's beparams dict with cluster defaults.
2003
2004 @type instance: L{objects.Instance}
2005 @param instance: the instance parameter to fill
2006 @rtype: dict
2007 @return: a copy of the instance's beparams with missing keys filled from
2008 the cluster defaults
2009
2010 """
2011 return self.SimpleFillBE(instance.beparams)
2012
2013 def SimpleFillNIC(self, nicparams):
2014 """Fill a given nicparams dict with cluster defaults.
2015
2016 @type nicparams: dict
2017 @param nicparams: the dict to fill
2018 @rtype: dict
2019 @return: a copy of the passed in nicparams with missing keys filled
2020 from the cluster defaults
2021
2022 """
2023 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
2024
2025 def SimpleFillOS(self, os_name,
2026 os_params_public,
2027 os_params_private=None,
2028 os_params_secret=None):
2029 """Fill an instance's osparams dict with cluster defaults.
2030
2031 @type os_name: string
2032 @param os_name: the OS name to use
2033 @type os_params_public: dict
2034 @param os_params_public: the dict to fill with default values
2035 @type os_params_private: dict
2036 @param os_params_private: the dict with private fields to fill
2037 with default values. Not passing this field
2038 results in no private fields being added to the
2039 return value. Private fields will be wrapped in
2040 L{Private} objects.
2041 @type os_params_secret: dict
2042 @param os_params_secret: the dict with secret fields to fill
2043 with default values. Not passing this field
2044 results in no secret fields being added to the
2045 return value. Private fields will be wrapped in
2046 L{Private} objects.
2047 @rtype: dict
2048 @return: a copy of the instance's osparams with missing keys filled from
2049 the cluster defaults. Private and secret parameters are not included
2050 unless the respective optional parameters are supplied.
2051
2052 """
2053 if os_name is None:
2054 name_only = None
2055 else:
2056 name_only = OS.GetName(os_name)
2057
2058 defaults_base_public = self.osparams.get(name_only, {})
2059 defaults_public = FillDict(defaults_base_public,
2060 self.osparams.get(os_name, {}))
2061 params_public = FillDict(defaults_public, os_params_public)
2062
2063 if os_params_private is not None:
2064 defaults_base_private = self.osparams_private_cluster.get(name_only, {})
2065 defaults_private = FillDict(defaults_base_private,
2066 self.osparams_private_cluster.get(os_name,
2067 {}))
2068 params_private = FillDict(defaults_private, os_params_private)
2069 else:
2070 params_private = {}
2071
2072 if os_params_secret is not None:
2073 # There can't be default secret settings, so there's nothing to be done.
2074 params_secret = os_params_secret
2075 else:
2076 params_secret = {}
2077
2078 # Enforce that the set of keys be distinct:
2079 duplicate_keys = utils.GetRepeatedKeys(params_public,
2080 params_private,
2081 params_secret)
2082 if not duplicate_keys:
2083
2084 # Actually update them:
2085 params_public.update(params_private)
2086 params_public.update(params_secret)
2087
2088 return params_public
2089
2090 else:
2091
2092 def formatter(keys):
2093 return utils.CommaJoin(sorted(map(repr, keys))) if keys else "(none)"
2094
2095 #Lose the values.
2096 params_public = set(params_public)
2097 params_private = set(params_private)
2098 params_secret = set(params_secret)
2099
2100 msg = """Cannot assign multiple values to OS parameters.
2101
2102 Conflicting OS parameters that would have been set by this operation:
2103 - at public visibility: {public}
2104 - at private visibility: {private}
2105 - at secret visibility: {secret}
2106 """.format(public=formatter(params_public & duplicate_keys),
2107 private=formatter(params_private & duplicate_keys),
2108 secret=formatter(params_secret & duplicate_keys))
2109 raise errors.OpPrereqError(msg)
2110
2111 @staticmethod
2112 def SimpleFillHvState(hv_state):
2113 """Fill an hv_state sub dict with cluster defaults.
2114
2115 """
2116 return FillDict(constants.HVST_DEFAULTS, hv_state)
2117
2118 @staticmethod
2119 def SimpleFillDiskState(disk_state):
2120 """Fill an disk_state sub dict with cluster defaults.
2121
2122 """
2123 return FillDict(constants.DS_DEFAULTS, disk_state)
2124
2125 def FillND(self, node, nodegroup):
2126 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
2127
2128 @type node: L{objects.Node}
2129 @param node: A Node object to fill
2130 @type nodegroup: L{objects.NodeGroup}
2131 @param nodegroup: A Node object to fill
2132 @return a copy of the node's ndparams with defaults filled
2133
2134 """
2135 return self.SimpleFillND(nodegroup.FillND(node))
2136
2137 def FillNDGroup(self, nodegroup):
2138 """Return filled out ndparams for just L{objects.NodeGroup}
2139
2140 @type nodegroup: L{objects.NodeGroup}
2141 @param nodegroup: A Node object to fill
2142 @return a copy of the node group's ndparams with defaults filled
2143
2144 """
2145 return self.SimpleFillND(nodegroup.SimpleFillND({}))
2146
2147 def SimpleFillND(self, ndparams):
2148 """Fill a given ndparams dict with defaults.
2149
2150 @type ndparams: dict
2151 @param ndparams: the dict to fill
2152 @rtype: dict
2153 @return: a copy of the passed in ndparams with missing keys filled
2154 from the cluster defaults
2155
2156 """
2157 return FillDict(self.ndparams, ndparams)
2158
2159 def SimpleFillIPolicy(self, ipolicy):
2160 """ Fill instance policy dict with defaults.
2161
2162 @type ipolicy: dict
2163 @param ipolicy: the dict to fill
2164 @rtype: dict
2165 @return: a copy of passed ipolicy with missing keys filled from
2166 the cluster defaults
2167
2168 """
2169 return FillIPolicy(self.ipolicy, ipolicy)
2170
2171 def IsDiskTemplateEnabled(self, disk_template):
2172 """Checks if a particular disk template is enabled.
2173
2174 """
2175 return utils.storage.IsDiskTemplateEnabled(
2176 disk_template, self.enabled_disk_templates)
2177
2178 def IsFileStorageEnabled(self):
2179 """Checks if file storage is enabled.
2180
2181 """
2182 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
2183
2184 def IsSharedFileStorageEnabled(self):
2185 """Checks if shared file storage is enabled.
2186
2187 """
2188 return utils.storage.IsSharedFileStorageEnabled(
2189 self.enabled_disk_templates)
2190
2191
2192 class BlockDevStatus(ConfigObject):
2193 """Config object representing the status of a block device."""
2194 __slots__ = [
2195 "dev_path",
2196 "major",
2197 "minor",
2198 "sync_percent",
2199 "estimated_time",
2200 "is_degraded",
2201 "ldisk_status",
2202 ]
2203
2204
2205 class ImportExportStatus(ConfigObject):
2206 """Config object representing the status of an import or export."""
2207 __slots__ = [
2208 "recent_output",
2209 "listen_port",
2210 "connected",
2211 "progress_mbytes",
2212 "progress_throughput",
2213 "progress_eta",
2214 "progress_percent",
2215 "exit_status",
2216 "error_message",
2217 ] + _TIMESTAMPS
2218
2219
2220 class ImportExportOptions(ConfigObject):
2221 """Options for import/export daemon
2222
2223 @ivar key_name: X509 key name (None for cluster certificate)
2224 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
2225 @ivar compress: Compression tool to use
2226 @ivar magic: Used to ensure the connection goes to the right disk
2227 @ivar ipv6: Whether to use IPv6
2228 @ivar connect_timeout: Number of seconds for establishing connection
2229
2230 """
2231 __slots__ = [
2232 "key_name",
2233 "ca_pem",
2234 "compress",
2235 "magic",
2236 "ipv6",
2237 "connect_timeout",
2238 ]
2239
2240
2241 class ConfdRequest(ConfigObject):
2242 """Object holding a confd request.
2243
2244 @ivar protocol: confd protocol version
2245 @ivar type: confd query type
2246 @ivar query: query request
2247 @ivar rsalt: requested reply salt
2248
2249 """
2250 __slots__ = [
2251 "protocol",
2252 "type",
2253 "query",
2254 "rsalt",
2255 ]
2256
2257
2258 class ConfdReply(ConfigObject):
2259 """Object holding a confd reply.
2260
2261 @ivar protocol: confd protocol version
2262 @ivar status: reply status code (ok, error)
2263 @ivar answer: confd query reply
2264 @ivar serial: configuration serial number
2265
2266 """
2267 __slots__ = [
2268 "protocol",
2269 "status",
2270 "answer",
2271 "serial",
2272 ]
2273
2274
2275 class QueryFieldDefinition(ConfigObject):
2276 """Object holding a query field definition.
2277
2278 @ivar name: Field name
2279 @ivar title: Human-readable title
2280 @ivar kind: Field type
2281 @ivar doc: Human-readable description
2282
2283 """
2284 __slots__ = [
2285 "name",
2286 "title",
2287 "kind",
2288 "doc",
2289 ]
2290
2291
2292 class _QueryResponseBase(ConfigObject):
2293 __slots__ = [
2294 "fields",
2295 ]
2296
2297 def ToDict(self, _with_private=False):
2298 """Custom function for serializing.
2299
2300 """
2301 mydict = super(_QueryResponseBase, self).ToDict()
2302 mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2303 return mydict
2304
2305 @classmethod
2306 def FromDict(cls, val):
2307 """Custom function for de-serializing.
2308
2309 """
2310 obj = super(_QueryResponseBase, cls).FromDict(val)
2311 obj.fields = \
2312 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2313 return obj
2314
2315
2316 class QueryResponse(_QueryResponseBase):
2317 """Object holding the response to a query.
2318
2319 @ivar fields: List of L{QueryFieldDefinition} objects
2320 @ivar data: Requested data
2321
2322 """
2323 __slots__ = [
2324 "data",
2325 ]
2326
2327
2328 class QueryFieldsRequest(ConfigObject):
2329 """Object holding a request for querying available fields.
2330
2331 """
2332 __slots__ = [
2333 "what",
2334 "fields",
2335 ]
2336
2337
2338 class QueryFieldsResponse(_QueryResponseBase):
2339 """Object holding the response to a query for fields.
2340
2341 @ivar fields: List of L{QueryFieldDefinition} objects
2342
2343 """
2344 __slots__ = []
2345
2346
2347 class MigrationStatus(ConfigObject):
2348 """Object holding the status of a migration.
2349
2350 """
2351 __slots__ = [
2352 "status",
2353 "transferred_ram",
2354 "total_ram",
2355 ]
2356
2357
2358 class InstanceConsole(ConfigObject):
2359 """Object describing how to access the console of an instance.
2360
2361 """
2362 __slots__ = [
2363 "instance",
2364 "kind",
2365 "message",
2366 "host",
2367 "port",
2368 "user",
2369 "command",
2370 "display",
2371 ]
2372
2373 def Validate(self):
2374 """Validates contents of this object.
2375
2376 """
2377 assert self.kind in constants.CONS_ALL, "Unknown console type"
2378 assert self.instance, "Missing instance name"
2379 assert self.message or self.kind in [constants.CONS_SSH,
2380 constants.CONS_SPICE,
2381 constants.CONS_VNC]
2382 assert self.host or self.kind == constants.CONS_MESSAGE
2383 assert self.port or self.kind in [constants.CONS_MESSAGE,
2384 constants.CONS_SSH]
2385 assert self.user or self.kind in [constants.CONS_MESSAGE,
2386 constants.CONS_SPICE,
2387 constants.CONS_VNC]
2388 assert self.command or self.kind in [constants.CONS_MESSAGE,
2389 constants.CONS_SPICE,
2390 constants.CONS_VNC]
2391 assert self.display or self.kind in [constants.CONS_MESSAGE,
2392 constants.CONS_SPICE,
2393 constants.CONS_SSH]
2394
2395
2396 class Network(TaggableObject):
2397 """Object representing a network definition for ganeti.
2398
2399 """
2400 __slots__ = [
2401 "name",
2402 "serial_no",
2403 "mac_prefix",
2404 "network",
2405 "network6",
2406 "gateway",
2407 "gateway6",
2408 "reservations",
2409 "ext_reservations",
2410 ] + _TIMESTAMPS + _UUID
2411
2412 def HooksDict(self, prefix=""):
2413 """Export a dictionary used by hooks with a network's information.
2414
2415 @type prefix: String
2416 @param prefix: Prefix to prepend to the dict entries
2417
2418 """
2419 result = {
2420 "%sNETWORK_NAME" % prefix: self.name,
2421 "%sNETWORK_UUID" % prefix: self.uuid,
2422 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2423 }
2424 if self.network:
2425 result["%sNETWORK_SUBNET" % prefix] = self.network
2426 if self.gateway:
2427 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2428 if self.network6:
2429 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2430 if self.gateway6:
2431 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2432 if self.mac_prefix:
2433 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2434
2435 return result
2436
2437 @classmethod
2438 def FromDict(cls, val):
2439 """Custom function for networks.
2440
2441 Remove deprecated network_type and family.
2442
2443 """
2444 if "network_type" in val:
2445 del val["network_type"]
2446 if "family" in val:
2447 del val["family"]
2448 obj = super(Network, cls).FromDict(val)
2449 return obj
2450
2451
2452 # need to inherit object in order to use super()
2453 class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2454 """Simple wrapper over ConfigParse that allows serialization.
2455
2456 This class is basically ConfigParser.SafeConfigParser with two
2457 additional methods that allow it to serialize/unserialize to/from a
2458 buffer.
2459
2460 """
2461 def Dumps(self):
2462 """Dump this instance and return the string representation."""
2463 buf = StringIO()
2464 self.write(buf)
2465 return buf.getvalue()
2466
2467 @classmethod
2468 def Loads(cls, data):
2469 """Load data from a string."""
2470 buf = StringIO(data)
2471 cfp = cls()
2472 cfp.readfp(buf)
2473 return cfp
2474
2475 def get(self, section, option, **kwargs):
2476 value = None
2477 try:
2478 value = super(SerializableConfigParser, self).get(section, option,
2479 **kwargs)
2480 if value.lower() == constants.VALUE_NONE:
2481 value = None
2482 except ConfigParser.NoOptionError:
2483 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2484 match = r.match(option)
2485 if match:
2486 pass
2487 else:
2488 raise
2489
2490 return value
2491
2492
2493 class LvmPvInfo(ConfigObject):
2494 """Information about an LVM physical volume (PV).
2495
2496 @type name: string
2497 @ivar name: name of the PV
2498 @type vg_name: string
2499 @ivar vg_name: name of the volume group containing the PV
2500 @type size: float
2501 @ivar size: size of the PV in MiB
2502 @type free: float
2503 @ivar free: free space in the PV, in MiB
2504 @type attributes: string
2505 @ivar attributes: PV attributes
2506 @type lv_list: list of strings
2507 @ivar lv_list: names of the LVs hosted on the PV
2508 """
2509 __slots__ = [
2510 "name",
2511 "vg_name",
2512 "size",
2513 "free",
2514 "attributes",
2515 "lv_list"
2516 ]
2517
2518 def IsEmpty(self):
2519 """Is this PV empty?
2520
2521 """
2522 return self.size <= (self.free + 1)
2523
2524 def IsAllocatable(self):
2525 """Is this PV allocatable?
2526
2527 """
2528 return ("a" in self.attributes)