Add tags in network objects
[ganeti-github.git] / lib / objects.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Transportable objects for Ganeti.
23
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
26
27 """
28
29 # pylint: disable=E0203,W0201,R0902
30
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitly initialise its members
33
34 # W0201: Attribute '%s' defined outside __init__
35
36 # R0902: Allow instances of these objects to have more than 20 attributes
37
38 import ConfigParser
39 import re
40 import copy
41 import time
42 from cStringIO import StringIO
43
44 from ganeti import errors
45 from ganeti import constants
46 from ganeti import netutils
47 from ganeti import objectutils
48 from ganeti import utils
49
50 from socket import AF_INET
51
52
53 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
54 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
55
56 _TIMESTAMPS = ["ctime", "mtime"]
57 _UUID = ["uuid"]
58
59
60 def FillDict(defaults_dict, custom_dict, skip_keys=None):
61 """Basic function to apply settings on top a default dict.
62
63 @type defaults_dict: dict
64 @param defaults_dict: dictionary holding the default values
65 @type custom_dict: dict
66 @param custom_dict: dictionary holding customized value
67 @type skip_keys: list
68 @param skip_keys: which keys not to fill
69 @rtype: dict
70 @return: dict with the 'full' values
71
72 """
73 ret_dict = copy.deepcopy(defaults_dict)
74 ret_dict.update(custom_dict)
75 if skip_keys:
76 for k in skip_keys:
77 try:
78 del ret_dict[k]
79 except KeyError:
80 pass
81 return ret_dict
82
83
84 def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
85 """Fills an instance policy with defaults.
86
87 """
88 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
89 ret_dict = {}
90 for key in constants.IPOLICY_ISPECS:
91 ret_dict[key] = FillDict(default_ipolicy[key],
92 custom_ipolicy.get(key, {}),
93 skip_keys=skip_keys)
94 # list items
95 for key in [constants.IPOLICY_DTS]:
96 ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
97 # other items which we know we can directly copy (immutables)
98 for key in constants.IPOLICY_PARAMETERS:
99 ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
100
101 return ret_dict
102
103
104 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
105 """Fills the disk parameter defaults.
106
107 @see: L{FillDict} for parameters and return value
108
109 """
110 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
111
112 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
113 skip_keys=skip_keys))
114 for dt in constants.DISK_TEMPLATES)
115
116
117 def UpgradeGroupedParams(target, defaults):
118 """Update all groups for the target parameter.
119
120 @type target: dict of dicts
121 @param target: {group: {parameter: value}}
122 @type defaults: dict
123 @param defaults: default parameter values
124
125 """
126 if target is None:
127 target = {constants.PP_DEFAULT: defaults}
128 else:
129 for group in target:
130 target[group] = FillDict(defaults, target[group])
131 return target
132
133
134 def UpgradeBeParams(target):
135 """Update the be parameters dict to the new format.
136
137 @type target: dict
138 @param target: "be" parameters dict
139
140 """
141 if constants.BE_MEMORY in target:
142 memory = target[constants.BE_MEMORY]
143 target[constants.BE_MAXMEM] = memory
144 target[constants.BE_MINMEM] = memory
145 del target[constants.BE_MEMORY]
146
147
148 def UpgradeDiskParams(diskparams):
149 """Upgrade the disk parameters.
150
151 @type diskparams: dict
152 @param diskparams: disk parameters to upgrade
153 @rtype: dict
154 @return: the upgraded disk parameters dict
155
156 """
157 if not diskparams:
158 result = {}
159 else:
160 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
161
162 return result
163
164
165 def UpgradeNDParams(ndparams):
166 """Upgrade ndparams structure.
167
168 @type ndparams: dict
169 @param ndparams: disk parameters to upgrade
170 @rtype: dict
171 @return: the upgraded node parameters dict
172
173 """
174 if ndparams is None:
175 ndparams = {}
176
177 if (constants.ND_OOB_PROGRAM in ndparams and
178 ndparams[constants.ND_OOB_PROGRAM] is None):
179 # will be reset by the line below
180 del ndparams[constants.ND_OOB_PROGRAM]
181 return FillDict(constants.NDC_DEFAULTS, ndparams)
182
183
184 def MakeEmptyIPolicy():
185 """Create empty IPolicy dictionary.
186
187 """
188 return dict([
189 (constants.ISPECS_MIN, {}),
190 (constants.ISPECS_MAX, {}),
191 (constants.ISPECS_STD, {}),
192 ])
193
194
195 class ConfigObject(objectutils.ValidatedSlots):
196 """A generic config object.
197
198 It has the following properties:
199
200 - provides somewhat safe recursive unpickling and pickling for its classes
201 - unset attributes which are defined in slots are always returned
202 as None instead of raising an error
203
204 Classes derived from this must always declare __slots__ (we use many
205 config objects and the memory reduction is useful)
206
207 """
208 __slots__ = []
209
210 def __getattr__(self, name):
211 if name not in self.GetAllSlots():
212 raise AttributeError("Invalid object attribute %s.%s" %
213 (type(self).__name__, name))
214 return None
215
216 def __setstate__(self, state):
217 slots = self.GetAllSlots()
218 for name in state:
219 if name in slots:
220 setattr(self, name, state[name])
221
222 def Validate(self):
223 """Validates the slots.
224
225 """
226
227 def ToDict(self):
228 """Convert to a dict holding only standard python types.
229
230 The generic routine just dumps all of this object's attributes in
231 a dict. It does not work if the class has children who are
232 ConfigObjects themselves (e.g. the nics list in an Instance), in
233 which case the object should subclass the function in order to
234 make sure all objects returned are only standard python types.
235
236 """
237 result = {}
238 for name in self.GetAllSlots():
239 value = getattr(self, name, None)
240 if value is not None:
241 result[name] = value
242 return result
243
244 __getstate__ = ToDict
245
246 @classmethod
247 def FromDict(cls, val):
248 """Create an object from a dictionary.
249
250 This generic routine takes a dict, instantiates a new instance of
251 the given class, and sets attributes based on the dict content.
252
253 As for `ToDict`, this does not work if the class has children
254 who are ConfigObjects themselves (e.g. the nics list in an
255 Instance), in which case the object should subclass the function
256 and alter the objects.
257
258 """
259 if not isinstance(val, dict):
260 raise errors.ConfigurationError("Invalid object passed to FromDict:"
261 " expected dict, got %s" % type(val))
262 val_str = dict([(str(k), v) for k, v in val.iteritems()])
263 obj = cls(**val_str) # pylint: disable=W0142
264 return obj
265
266 @staticmethod
267 def _ContainerToDicts(container):
268 """Convert the elements of a container to standard python types.
269
270 This method converts a container with elements derived from
271 ConfigData to standard python types. If the container is a dict,
272 we don't touch the keys, only the values.
273
274 """
275 if isinstance(container, dict):
276 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
277 elif isinstance(container, (list, tuple, set, frozenset)):
278 ret = [elem.ToDict() for elem in container]
279 else:
280 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
281 type(container))
282 return ret
283
284 @staticmethod
285 def _ContainerFromDicts(source, c_type, e_type):
286 """Convert a container from standard python types.
287
288 This method converts a container with standard python types to
289 ConfigData objects. If the container is a dict, we don't touch the
290 keys, only the values.
291
292 """
293 if not isinstance(c_type, type):
294 raise TypeError("Container type %s passed to _ContainerFromDicts is"
295 " not a type" % type(c_type))
296 if source is None:
297 source = c_type()
298 if c_type is dict:
299 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
300 elif c_type in (list, tuple, set, frozenset):
301 ret = c_type([e_type.FromDict(elem) for elem in source])
302 else:
303 raise TypeError("Invalid container type %s passed to"
304 " _ContainerFromDicts" % c_type)
305 return ret
306
307 def Copy(self):
308 """Makes a deep copy of the current object and its children.
309
310 """
311 dict_form = self.ToDict()
312 clone_obj = self.__class__.FromDict(dict_form)
313 return clone_obj
314
315 def __repr__(self):
316 """Implement __repr__ for ConfigObjects."""
317 return repr(self.ToDict())
318
319 def UpgradeConfig(self):
320 """Fill defaults for missing configuration values.
321
322 This method will be called at configuration load time, and its
323 implementation will be object dependent.
324
325 """
326 pass
327
328
329 class TaggableObject(ConfigObject):
330 """An generic class supporting tags.
331
332 """
333 __slots__ = ["tags"]
334 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
335
336 @classmethod
337 def ValidateTag(cls, tag):
338 """Check if a tag is valid.
339
340 If the tag is invalid, an errors.TagError will be raised. The
341 function has no return value.
342
343 """
344 if not isinstance(tag, basestring):
345 raise errors.TagError("Invalid tag type (not a string)")
346 if len(tag) > constants.MAX_TAG_LEN:
347 raise errors.TagError("Tag too long (>%d characters)" %
348 constants.MAX_TAG_LEN)
349 if not tag:
350 raise errors.TagError("Tags cannot be empty")
351 if not cls.VALID_TAG_RE.match(tag):
352 raise errors.TagError("Tag contains invalid characters")
353
354 def GetTags(self):
355 """Return the tags list.
356
357 """
358 tags = getattr(self, "tags", None)
359 if tags is None:
360 tags = self.tags = set()
361 return tags
362
363 def AddTag(self, tag):
364 """Add a new tag.
365
366 """
367 self.ValidateTag(tag)
368 tags = self.GetTags()
369 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
370 raise errors.TagError("Too many tags")
371 self.GetTags().add(tag)
372
373 def RemoveTag(self, tag):
374 """Remove a tag.
375
376 """
377 self.ValidateTag(tag)
378 tags = self.GetTags()
379 try:
380 tags.remove(tag)
381 except KeyError:
382 raise errors.TagError("Tag not found")
383
384 def ToDict(self):
385 """Taggable-object-specific conversion to standard python types.
386
387 This replaces the tags set with a list.
388
389 """
390 bo = super(TaggableObject, self).ToDict()
391
392 tags = bo.get("tags", None)
393 if isinstance(tags, set):
394 bo["tags"] = list(tags)
395 return bo
396
397 @classmethod
398 def FromDict(cls, val):
399 """Custom function for instances.
400
401 """
402 obj = super(TaggableObject, cls).FromDict(val)
403 if hasattr(obj, "tags") and isinstance(obj.tags, list):
404 obj.tags = set(obj.tags)
405 return obj
406
407
408 class MasterNetworkParameters(ConfigObject):
409 """Network configuration parameters for the master
410
411 @ivar name: master name
412 @ivar ip: master IP
413 @ivar netmask: master netmask
414 @ivar netdev: master network device
415 @ivar ip_family: master IP family
416
417 """
418 __slots__ = [
419 "name",
420 "ip",
421 "netmask",
422 "netdev",
423 "ip_family"
424 ]
425
426
427 class ConfigData(ConfigObject):
428 """Top-level config object."""
429 __slots__ = [
430 "version",
431 "cluster",
432 "nodes",
433 "nodegroups",
434 "instances",
435 "networks",
436 "serial_no",
437 ] + _TIMESTAMPS
438
439 def ToDict(self):
440 """Custom function for top-level config data.
441
442 This just replaces the list of instances, nodes and the cluster
443 with standard python types.
444
445 """
446 mydict = super(ConfigData, self).ToDict()
447 mydict["cluster"] = mydict["cluster"].ToDict()
448 for key in "nodes", "instances", "nodegroups", "networks":
449 mydict[key] = self._ContainerToDicts(mydict[key])
450
451 return mydict
452
453 @classmethod
454 def FromDict(cls, val):
455 """Custom function for top-level config data
456
457 """
458 obj = super(ConfigData, cls).FromDict(val)
459 obj.cluster = Cluster.FromDict(obj.cluster)
460 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
461 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
462 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
463 obj.networks = cls._ContainerFromDicts(obj.networks, dict, Network)
464 return obj
465
466 def HasAnyDiskOfType(self, dev_type):
467 """Check if in there is at disk of the given type in the configuration.
468
469 @type dev_type: L{constants.LDS_BLOCK}
470 @param dev_type: the type to look for
471 @rtype: boolean
472 @return: boolean indicating if a disk of the given type was found or not
473
474 """
475 for instance in self.instances.values():
476 for disk in instance.disks:
477 if disk.IsBasedOnDiskType(dev_type):
478 return True
479 return False
480
481 def UpgradeConfig(self):
482 """Fill defaults for missing configuration values.
483
484 """
485 self.cluster.UpgradeConfig()
486 for node in self.nodes.values():
487 node.UpgradeConfig()
488 for instance in self.instances.values():
489 instance.UpgradeConfig()
490 if self.nodegroups is None:
491 self.nodegroups = {}
492 for nodegroup in self.nodegroups.values():
493 nodegroup.UpgradeConfig()
494 if self.cluster.drbd_usermode_helper is None:
495 # To decide if we set an helper let's check if at least one instance has
496 # a DRBD disk. This does not cover all the possible scenarios but it
497 # gives a good approximation.
498 if self.HasAnyDiskOfType(constants.LD_DRBD8):
499 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
500 if self.networks is None:
501 self.networks = {}
502
503
504 class NIC(ConfigObject):
505 """Config object representing a network card."""
506 __slots__ = ["mac", "ip", "network", "nicparams", "netinfo"]
507
508 @classmethod
509 def CheckParameterSyntax(cls, nicparams):
510 """Check the given parameters for validity.
511
512 @type nicparams: dict
513 @param nicparams: dictionary with parameter names/value
514 @raise errors.ConfigurationError: when a parameter is not valid
515
516 """
517 if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
518 nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
519 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
520 raise errors.ConfigurationError(err)
521
522 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
523 not nicparams[constants.NIC_LINK]):
524 err = "Missing bridged nic link"
525 raise errors.ConfigurationError(err)
526
527
528 class Disk(ConfigObject):
529 """Config object representing a block device."""
530 __slots__ = ["dev_type", "logical_id", "physical_id",
531 "children", "iv_name", "size", "mode", "params"]
532
533 def CreateOnSecondary(self):
534 """Test if this device needs to be created on a secondary node."""
535 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
536
537 def AssembleOnSecondary(self):
538 """Test if this device needs to be assembled on a secondary node."""
539 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
540
541 def OpenOnSecondary(self):
542 """Test if this device needs to be opened on a secondary node."""
543 return self.dev_type in (constants.LD_LV,)
544
545 def StaticDevPath(self):
546 """Return the device path if this device type has a static one.
547
548 Some devices (LVM for example) live always at the same /dev/ path,
549 irrespective of their status. For such devices, we return this
550 path, for others we return None.
551
552 @warning: The path returned is not a normalized pathname; callers
553 should check that it is a valid path.
554
555 """
556 if self.dev_type == constants.LD_LV:
557 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
558 elif self.dev_type == constants.LD_BLOCKDEV:
559 return self.logical_id[1]
560 elif self.dev_type == constants.LD_RBD:
561 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
562 return None
563
564 def ChildrenNeeded(self):
565 """Compute the needed number of children for activation.
566
567 This method will return either -1 (all children) or a positive
568 number denoting the minimum number of children needed for
569 activation (only mirrored devices will usually return >=0).
570
571 Currently, only DRBD8 supports diskless activation (therefore we
572 return 0), for all other we keep the previous semantics and return
573 -1.
574
575 """
576 if self.dev_type == constants.LD_DRBD8:
577 return 0
578 return -1
579
580 def IsBasedOnDiskType(self, dev_type):
581 """Check if the disk or its children are based on the given type.
582
583 @type dev_type: L{constants.LDS_BLOCK}
584 @param dev_type: the type to look for
585 @rtype: boolean
586 @return: boolean indicating if a device of the given type was found or not
587
588 """
589 if self.children:
590 for child in self.children:
591 if child.IsBasedOnDiskType(dev_type):
592 return True
593 return self.dev_type == dev_type
594
595 def GetNodes(self, node):
596 """This function returns the nodes this device lives on.
597
598 Given the node on which the parent of the device lives on (or, in
599 case of a top-level device, the primary node of the devices'
600 instance), this function will return a list of nodes on which this
601 devices needs to (or can) be assembled.
602
603 """
604 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
605 constants.LD_BLOCKDEV, constants.LD_RBD]:
606 result = [node]
607 elif self.dev_type in constants.LDS_DRBD:
608 result = [self.logical_id[0], self.logical_id[1]]
609 if node not in result:
610 raise errors.ConfigurationError("DRBD device passed unknown node")
611 else:
612 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
613 return result
614
615 def ComputeNodeTree(self, parent_node):
616 """Compute the node/disk tree for this disk and its children.
617
618 This method, given the node on which the parent disk lives, will
619 return the list of all (node, disk) pairs which describe the disk
620 tree in the most compact way. For example, a drbd/lvm stack
621 will be returned as (primary_node, drbd) and (secondary_node, drbd)
622 which represents all the top-level devices on the nodes.
623
624 """
625 my_nodes = self.GetNodes(parent_node)
626 result = [(node, self) for node in my_nodes]
627 if not self.children:
628 # leaf device
629 return result
630 for node in my_nodes:
631 for child in self.children:
632 child_result = child.ComputeNodeTree(node)
633 if len(child_result) == 1:
634 # child (and all its descendants) is simple, doesn't split
635 # over multiple hosts, so we don't need to describe it, our
636 # own entry for this node describes it completely
637 continue
638 else:
639 # check if child nodes differ from my nodes; note that
640 # subdisk can differ from the child itself, and be instead
641 # one of its descendants
642 for subnode, subdisk in child_result:
643 if subnode not in my_nodes:
644 result.append((subnode, subdisk))
645 # otherwise child is under our own node, so we ignore this
646 # entry (but probably the other results in the list will
647 # be different)
648 return result
649
650 def ComputeGrowth(self, amount):
651 """Compute the per-VG growth requirements.
652
653 This only works for VG-based disks.
654
655 @type amount: integer
656 @param amount: the desired increase in (user-visible) disk space
657 @rtype: dict
658 @return: a dictionary of volume-groups and the required size
659
660 """
661 if self.dev_type == constants.LD_LV:
662 return {self.logical_id[0]: amount}
663 elif self.dev_type == constants.LD_DRBD8:
664 if self.children:
665 return self.children[0].ComputeGrowth(amount)
666 else:
667 return {}
668 else:
669 # Other disk types do not require VG space
670 return {}
671
672 def RecordGrow(self, amount):
673 """Update the size of this disk after growth.
674
675 This method recurses over the disks's children and updates their
676 size correspondigly. The method needs to be kept in sync with the
677 actual algorithms from bdev.
678
679 """
680 if self.dev_type in (constants.LD_LV, constants.LD_FILE,
681 constants.LD_RBD):
682 self.size += amount
683 elif self.dev_type == constants.LD_DRBD8:
684 if self.children:
685 self.children[0].RecordGrow(amount)
686 self.size += amount
687 else:
688 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
689 " disk type %s" % self.dev_type)
690
691 def Update(self, size=None, mode=None):
692 """Apply changes to size and mode.
693
694 """
695 if self.dev_type == constants.LD_DRBD8:
696 if self.children:
697 self.children[0].Update(size=size, mode=mode)
698 else:
699 assert not self.children
700
701 if size is not None:
702 self.size = size
703 if mode is not None:
704 self.mode = mode
705
706 def UnsetSize(self):
707 """Sets recursively the size to zero for the disk and its children.
708
709 """
710 if self.children:
711 for child in self.children:
712 child.UnsetSize()
713 self.size = 0
714
715 def SetPhysicalID(self, target_node, nodes_ip):
716 """Convert the logical ID to the physical ID.
717
718 This is used only for drbd, which needs ip/port configuration.
719
720 The routine descends down and updates its children also, because
721 this helps when the only the top device is passed to the remote
722 node.
723
724 Arguments:
725 - target_node: the node we wish to configure for
726 - nodes_ip: a mapping of node name to ip
727
728 The target_node must exist in in nodes_ip, and must be one of the
729 nodes in the logical ID for each of the DRBD devices encountered
730 in the disk tree.
731
732 """
733 if self.children:
734 for child in self.children:
735 child.SetPhysicalID(target_node, nodes_ip)
736
737 if self.logical_id is None and self.physical_id is not None:
738 return
739 if self.dev_type in constants.LDS_DRBD:
740 pnode, snode, port, pminor, sminor, secret = self.logical_id
741 if target_node not in (pnode, snode):
742 raise errors.ConfigurationError("DRBD device not knowing node %s" %
743 target_node)
744 pnode_ip = nodes_ip.get(pnode, None)
745 snode_ip = nodes_ip.get(snode, None)
746 if pnode_ip is None or snode_ip is None:
747 raise errors.ConfigurationError("Can't find primary or secondary node"
748 " for %s" % str(self))
749 p_data = (pnode_ip, port)
750 s_data = (snode_ip, port)
751 if pnode == target_node:
752 self.physical_id = p_data + s_data + (pminor, secret)
753 else: # it must be secondary, we tested above
754 self.physical_id = s_data + p_data + (sminor, secret)
755 else:
756 self.physical_id = self.logical_id
757 return
758
759 def ToDict(self):
760 """Disk-specific conversion to standard python types.
761
762 This replaces the children lists of objects with lists of
763 standard python types.
764
765 """
766 bo = super(Disk, self).ToDict()
767
768 for attr in ("children",):
769 alist = bo.get(attr, None)
770 if alist:
771 bo[attr] = self._ContainerToDicts(alist)
772 return bo
773
774 @classmethod
775 def FromDict(cls, val):
776 """Custom function for Disks
777
778 """
779 obj = super(Disk, cls).FromDict(val)
780 if obj.children:
781 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
782 if obj.logical_id and isinstance(obj.logical_id, list):
783 obj.logical_id = tuple(obj.logical_id)
784 if obj.physical_id and isinstance(obj.physical_id, list):
785 obj.physical_id = tuple(obj.physical_id)
786 if obj.dev_type in constants.LDS_DRBD:
787 # we need a tuple of length six here
788 if len(obj.logical_id) < 6:
789 obj.logical_id += (None,) * (6 - len(obj.logical_id))
790 return obj
791
792 def __str__(self):
793 """Custom str() formatter for disks.
794
795 """
796 if self.dev_type == constants.LD_LV:
797 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
798 elif self.dev_type in constants.LDS_DRBD:
799 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
800 val = "<DRBD8("
801 if self.physical_id is None:
802 phy = "unconfigured"
803 else:
804 phy = ("configured as %s:%s %s:%s" %
805 (self.physical_id[0], self.physical_id[1],
806 self.physical_id[2], self.physical_id[3]))
807
808 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
809 (node_a, minor_a, node_b, minor_b, port, phy))
810 if self.children and self.children.count(None) == 0:
811 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
812 else:
813 val += "no local storage"
814 else:
815 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
816 (self.dev_type, self.logical_id, self.physical_id, self.children))
817 if self.iv_name is None:
818 val += ", not visible"
819 else:
820 val += ", visible as /dev/%s" % self.iv_name
821 if isinstance(self.size, int):
822 val += ", size=%dm)>" % self.size
823 else:
824 val += ", size='%s')>" % (self.size,)
825 return val
826
827 def Verify(self):
828 """Checks that this disk is correctly configured.
829
830 """
831 all_errors = []
832 if self.mode not in constants.DISK_ACCESS_SET:
833 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
834 return all_errors
835
836 def UpgradeConfig(self):
837 """Fill defaults for missing configuration values.
838
839 """
840 if self.children:
841 for child in self.children:
842 child.UpgradeConfig()
843
844 # FIXME: Make this configurable in Ganeti 2.7
845 self.params = {}
846 # add here config upgrade for this disk
847
848 @staticmethod
849 def ComputeLDParams(disk_template, disk_params):
850 """Computes Logical Disk parameters from Disk Template parameters.
851
852 @type disk_template: string
853 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
854 @type disk_params: dict
855 @param disk_params: disk template parameters;
856 dict(template_name -> parameters
857 @rtype: list(dict)
858 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
859 contains the LD parameters of the node. The tree is flattened in-order.
860
861 """
862 if disk_template not in constants.DISK_TEMPLATES:
863 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
864
865 assert disk_template in disk_params
866
867 result = list()
868 dt_params = disk_params[disk_template]
869 if disk_template == constants.DT_DRBD8:
870 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
871 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
872 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
873 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
874 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
875 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
876 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
877 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
878 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
879 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
880 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
881 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
882 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
883 }))
884
885 # data LV
886 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
887 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
888 }))
889
890 # metadata LV
891 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
892 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
893 }))
894
895 elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
896 result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
897
898 elif disk_template == constants.DT_PLAIN:
899 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
900 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
901 }))
902
903 elif disk_template == constants.DT_BLOCK:
904 result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
905
906 elif disk_template == constants.DT_RBD:
907 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
908 constants.LDP_POOL: dt_params[constants.RBD_POOL]
909 }))
910
911 return result
912
913
914 class InstancePolicy(ConfigObject):
915 """Config object representing instance policy limits dictionary.
916
917
918 Note that this object is not actually used in the config, it's just
919 used as a placeholder for a few functions.
920
921 """
922 @classmethod
923 def CheckParameterSyntax(cls, ipolicy, check_std):
924 """ Check the instance policy for validity.
925
926 """
927 for param in constants.ISPECS_PARAMETERS:
928 InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std)
929 if constants.IPOLICY_DTS in ipolicy:
930 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
931 for key in constants.IPOLICY_PARAMETERS:
932 if key in ipolicy:
933 InstancePolicy.CheckParameter(key, ipolicy[key])
934 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
935 if wrong_keys:
936 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
937 utils.CommaJoin(wrong_keys))
938
939 @classmethod
940 def CheckISpecSyntax(cls, ipolicy, name, check_std):
941 """Check the instance policy for validity on a given key.
942
943 We check if the instance policy makes sense for a given key, that is
944 if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
945
946 @type ipolicy: dict
947 @param ipolicy: dictionary with min, max, std specs
948 @type name: string
949 @param name: what are the limits for
950 @type check_std: bool
951 @param check_std: Whether to check std value or just assume compliance
952 @raise errors.ConfigureError: when specs for given name are not valid
953
954 """
955 min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
956
957 if check_std:
958 std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
959 std_msg = std_v
960 else:
961 std_v = min_v
962 std_msg = "-"
963
964 max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
965 err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
966 (name,
967 ipolicy[constants.ISPECS_MIN].get(name, "-"),
968 ipolicy[constants.ISPECS_MAX].get(name, "-"),
969 std_msg))
970 if min_v > std_v or std_v > max_v:
971 raise errors.ConfigurationError(err)
972
973 @classmethod
974 def CheckDiskTemplates(cls, disk_templates):
975 """Checks the disk templates for validity.
976
977 """
978 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
979 if wrong:
980 raise errors.ConfigurationError("Invalid disk template(s) %s" %
981 utils.CommaJoin(wrong))
982
983 @classmethod
984 def CheckParameter(cls, key, value):
985 """Checks a parameter.
986
987 Currently we expect all parameters to be float values.
988
989 """
990 try:
991 float(value)
992 except (TypeError, ValueError), err:
993 raise errors.ConfigurationError("Invalid value for key" " '%s':"
994 " '%s', error: %s" % (key, value, err))
995
996
997 class Instance(TaggableObject):
998 """Config object representing an instance."""
999 __slots__ = [
1000 "name",
1001 "primary_node",
1002 "os",
1003 "hypervisor",
1004 "hvparams",
1005 "beparams",
1006 "osparams",
1007 "admin_state",
1008 "nics",
1009 "disks",
1010 "disk_template",
1011 "network_port",
1012 "serial_no",
1013 ] + _TIMESTAMPS + _UUID
1014
1015 def _ComputeSecondaryNodes(self):
1016 """Compute the list of secondary nodes.
1017
1018 This is a simple wrapper over _ComputeAllNodes.
1019
1020 """
1021 all_nodes = set(self._ComputeAllNodes())
1022 all_nodes.discard(self.primary_node)
1023 return tuple(all_nodes)
1024
1025 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1026 "List of secondary nodes")
1027
1028 def _ComputeAllNodes(self):
1029 """Compute the list of all nodes.
1030
1031 Since the data is already there (in the drbd disks), keeping it as
1032 a separate normal attribute is redundant and if not properly
1033 synchronised can cause problems. Thus it's better to compute it
1034 dynamically.
1035
1036 """
1037 def _Helper(nodes, device):
1038 """Recursively computes nodes given a top device."""
1039 if device.dev_type in constants.LDS_DRBD:
1040 nodea, nodeb = device.logical_id[:2]
1041 nodes.add(nodea)
1042 nodes.add(nodeb)
1043 if device.children:
1044 for child in device.children:
1045 _Helper(nodes, child)
1046
1047 all_nodes = set()
1048 all_nodes.add(self.primary_node)
1049 for device in self.disks:
1050 _Helper(all_nodes, device)
1051 return tuple(all_nodes)
1052
1053 all_nodes = property(_ComputeAllNodes, None, None,
1054 "List of all nodes of the instance")
1055
1056 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1057 """Provide a mapping of nodes to LVs this instance owns.
1058
1059 This function figures out what logical volumes should belong on
1060 which nodes, recursing through a device tree.
1061
1062 @param lvmap: optional dictionary to receive the
1063 'node' : ['lv', ...] data.
1064
1065 @return: None if lvmap arg is given, otherwise, a dictionary of
1066 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1067 volumeN is of the form "vg_name/lv_name", compatible with
1068 GetVolumeList()
1069
1070 """
1071 if node is None:
1072 node = self.primary_node
1073
1074 if lvmap is None:
1075 lvmap = {
1076 node: [],
1077 }
1078 ret = lvmap
1079 else:
1080 if not node in lvmap:
1081 lvmap[node] = []
1082 ret = None
1083
1084 if not devs:
1085 devs = self.disks
1086
1087 for dev in devs:
1088 if dev.dev_type == constants.LD_LV:
1089 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1090
1091 elif dev.dev_type in constants.LDS_DRBD:
1092 if dev.children:
1093 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1094 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1095
1096 elif dev.children:
1097 self.MapLVsByNode(lvmap, dev.children, node)
1098
1099 return ret
1100
1101 def FindDisk(self, idx):
1102 """Find a disk given having a specified index.
1103
1104 This is just a wrapper that does validation of the index.
1105
1106 @type idx: int
1107 @param idx: the disk index
1108 @rtype: L{Disk}
1109 @return: the corresponding disk
1110 @raise errors.OpPrereqError: when the given index is not valid
1111
1112 """
1113 try:
1114 idx = int(idx)
1115 return self.disks[idx]
1116 except (TypeError, ValueError), err:
1117 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1118 errors.ECODE_INVAL)
1119 except IndexError:
1120 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1121 " 0 to %d" % (idx, len(self.disks) - 1),
1122 errors.ECODE_INVAL)
1123
1124 def ToDict(self):
1125 """Instance-specific conversion to standard python types.
1126
1127 This replaces the children lists of objects with lists of standard
1128 python types.
1129
1130 """
1131 bo = super(Instance, self).ToDict()
1132
1133 for attr in "nics", "disks":
1134 alist = bo.get(attr, None)
1135 if alist:
1136 nlist = self._ContainerToDicts(alist)
1137 else:
1138 nlist = []
1139 bo[attr] = nlist
1140 return bo
1141
1142 @classmethod
1143 def FromDict(cls, val):
1144 """Custom function for instances.
1145
1146 """
1147 if "admin_state" not in val:
1148 if val.get("admin_up", False):
1149 val["admin_state"] = constants.ADMINST_UP
1150 else:
1151 val["admin_state"] = constants.ADMINST_DOWN
1152 if "admin_up" in val:
1153 del val["admin_up"]
1154 obj = super(Instance, cls).FromDict(val)
1155 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1156 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1157 return obj
1158
1159 def UpgradeConfig(self):
1160 """Fill defaults for missing configuration values.
1161
1162 """
1163 for nic in self.nics:
1164 nic.UpgradeConfig()
1165 for disk in self.disks:
1166 disk.UpgradeConfig()
1167 if self.hvparams:
1168 for key in constants.HVC_GLOBALS:
1169 try:
1170 del self.hvparams[key]
1171 except KeyError:
1172 pass
1173 if self.osparams is None:
1174 self.osparams = {}
1175 UpgradeBeParams(self.beparams)
1176
1177
1178 class OS(ConfigObject):
1179 """Config object representing an operating system.
1180
1181 @type supported_parameters: list
1182 @ivar supported_parameters: a list of tuples, name and description,
1183 containing the supported parameters by this OS
1184
1185 @type VARIANT_DELIM: string
1186 @cvar VARIANT_DELIM: the variant delimiter
1187
1188 """
1189 __slots__ = [
1190 "name",
1191 "path",
1192 "api_versions",
1193 "create_script",
1194 "export_script",
1195 "import_script",
1196 "rename_script",
1197 "verify_script",
1198 "supported_variants",
1199 "supported_parameters",
1200 ]
1201
1202 VARIANT_DELIM = "+"
1203
1204 @classmethod
1205 def SplitNameVariant(cls, name):
1206 """Splits the name into the proper name and variant.
1207
1208 @param name: the OS (unprocessed) name
1209 @rtype: list
1210 @return: a list of two elements; if the original name didn't
1211 contain a variant, it's returned as an empty string
1212
1213 """
1214 nv = name.split(cls.VARIANT_DELIM, 1)
1215 if len(nv) == 1:
1216 nv.append("")
1217 return nv
1218
1219 @classmethod
1220 def GetName(cls, name):
1221 """Returns the proper name of the os (without the variant).
1222
1223 @param name: the OS (unprocessed) name
1224
1225 """
1226 return cls.SplitNameVariant(name)[0]
1227
1228 @classmethod
1229 def GetVariant(cls, name):
1230 """Returns the variant the os (without the base name).
1231
1232 @param name: the OS (unprocessed) name
1233
1234 """
1235 return cls.SplitNameVariant(name)[1]
1236
1237
1238 class NodeHvState(ConfigObject):
1239 """Hypvervisor state on a node.
1240
1241 @ivar mem_total: Total amount of memory
1242 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1243 available)
1244 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1245 rounding
1246 @ivar mem_inst: Memory used by instances living on node
1247 @ivar cpu_total: Total node CPU core count
1248 @ivar cpu_node: Number of CPU cores reserved for the node itself
1249
1250 """
1251 __slots__ = [
1252 "mem_total",
1253 "mem_node",
1254 "mem_hv",
1255 "mem_inst",
1256 "cpu_total",
1257 "cpu_node",
1258 ] + _TIMESTAMPS
1259
1260
1261 class NodeDiskState(ConfigObject):
1262 """Disk state on a node.
1263
1264 """
1265 __slots__ = [
1266 "total",
1267 "reserved",
1268 "overhead",
1269 ] + _TIMESTAMPS
1270
1271
1272 class Node(TaggableObject):
1273 """Config object representing a node.
1274
1275 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1276 @ivar hv_state_static: Hypervisor state overriden by user
1277 @ivar disk_state: Disk state (e.g. free space)
1278 @ivar disk_state_static: Disk state overriden by user
1279
1280 """
1281 __slots__ = [
1282 "name",
1283 "primary_ip",
1284 "secondary_ip",
1285 "serial_no",
1286 "master_candidate",
1287 "offline",
1288 "drained",
1289 "group",
1290 "master_capable",
1291 "vm_capable",
1292 "ndparams",
1293 "powered",
1294 "hv_state",
1295 "hv_state_static",
1296 "disk_state",
1297 "disk_state_static",
1298 ] + _TIMESTAMPS + _UUID
1299
1300 def UpgradeConfig(self):
1301 """Fill defaults for missing configuration values.
1302
1303 """
1304 # pylint: disable=E0203
1305 # because these are "defined" via slots, not manually
1306 if self.master_capable is None:
1307 self.master_capable = True
1308
1309 if self.vm_capable is None:
1310 self.vm_capable = True
1311
1312 if self.ndparams is None:
1313 self.ndparams = {}
1314
1315 if self.powered is None:
1316 self.powered = True
1317
1318 def ToDict(self):
1319 """Custom function for serializing.
1320
1321 """
1322 data = super(Node, self).ToDict()
1323
1324 hv_state = data.get("hv_state", None)
1325 if hv_state is not None:
1326 data["hv_state"] = self._ContainerToDicts(hv_state)
1327
1328 disk_state = data.get("disk_state", None)
1329 if disk_state is not None:
1330 data["disk_state"] = \
1331 dict((key, self._ContainerToDicts(value))
1332 for (key, value) in disk_state.items())
1333
1334 return data
1335
1336 @classmethod
1337 def FromDict(cls, val):
1338 """Custom function for deserializing.
1339
1340 """
1341 obj = super(Node, cls).FromDict(val)
1342
1343 if obj.hv_state is not None:
1344 obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1345
1346 if obj.disk_state is not None:
1347 obj.disk_state = \
1348 dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1349 for (key, value) in obj.disk_state.items())
1350
1351 return obj
1352
1353
1354 class NodeGroup(TaggableObject):
1355 """Config object representing a node group."""
1356 __slots__ = [
1357 "name",
1358 "members",
1359 "ndparams",
1360 "diskparams",
1361 "ipolicy",
1362 "serial_no",
1363 "hv_state_static",
1364 "disk_state_static",
1365 "alloc_policy",
1366 "networks",
1367 ] + _TIMESTAMPS + _UUID
1368
1369 def ToDict(self):
1370 """Custom function for nodegroup.
1371
1372 This discards the members object, which gets recalculated and is only kept
1373 in memory.
1374
1375 """
1376 mydict = super(NodeGroup, self).ToDict()
1377 del mydict["members"]
1378 return mydict
1379
1380 @classmethod
1381 def FromDict(cls, val):
1382 """Custom function for nodegroup.
1383
1384 The members slot is initialized to an empty list, upon deserialization.
1385
1386 """
1387 obj = super(NodeGroup, cls).FromDict(val)
1388 obj.members = []
1389 return obj
1390
1391 def UpgradeConfig(self):
1392 """Fill defaults for missing configuration values.
1393
1394 """
1395 if self.ndparams is None:
1396 self.ndparams = {}
1397
1398 if self.serial_no is None:
1399 self.serial_no = 1
1400
1401 if self.alloc_policy is None:
1402 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1403
1404 # We only update mtime, and not ctime, since we would not be able
1405 # to provide a correct value for creation time.
1406 if self.mtime is None:
1407 self.mtime = time.time()
1408
1409 if self.diskparams is None:
1410 self.diskparams = {}
1411 if self.ipolicy is None:
1412 self.ipolicy = MakeEmptyIPolicy()
1413
1414 if self.networks is None:
1415 self.networks = {}
1416
1417 def FillND(self, node):
1418 """Return filled out ndparams for L{objects.Node}
1419
1420 @type node: L{objects.Node}
1421 @param node: A Node object to fill
1422 @return a copy of the node's ndparams with defaults filled
1423
1424 """
1425 return self.SimpleFillND(node.ndparams)
1426
1427 def SimpleFillND(self, ndparams):
1428 """Fill a given ndparams dict with defaults.
1429
1430 @type ndparams: dict
1431 @param ndparams: the dict to fill
1432 @rtype: dict
1433 @return: a copy of the passed in ndparams with missing keys filled
1434 from the node group defaults
1435
1436 """
1437 return FillDict(self.ndparams, ndparams)
1438
1439
1440 class Cluster(TaggableObject):
1441 """Config object representing the cluster."""
1442 __slots__ = [
1443 "serial_no",
1444 "rsahostkeypub",
1445 "highest_used_port",
1446 "tcpudp_port_pool",
1447 "mac_prefix",
1448 "volume_group_name",
1449 "reserved_lvs",
1450 "drbd_usermode_helper",
1451 "default_bridge",
1452 "default_hypervisor",
1453 "master_node",
1454 "master_ip",
1455 "master_netdev",
1456 "master_netmask",
1457 "use_external_mip_script",
1458 "cluster_name",
1459 "file_storage_dir",
1460 "shared_file_storage_dir",
1461 "enabled_hypervisors",
1462 "hvparams",
1463 "ipolicy",
1464 "os_hvp",
1465 "beparams",
1466 "osparams",
1467 "nicparams",
1468 "ndparams",
1469 "diskparams",
1470 "candidate_pool_size",
1471 "modify_etc_hosts",
1472 "modify_ssh_setup",
1473 "maintain_node_health",
1474 "uid_pool",
1475 "default_iallocator",
1476 "hidden_os",
1477 "blacklisted_os",
1478 "primary_ip_family",
1479 "prealloc_wipe_disks",
1480 "hv_state_static",
1481 "disk_state_static",
1482 ] + _TIMESTAMPS + _UUID
1483
1484 def UpgradeConfig(self):
1485 """Fill defaults for missing configuration values.
1486
1487 """
1488 # pylint: disable=E0203
1489 # because these are "defined" via slots, not manually
1490 if self.hvparams is None:
1491 self.hvparams = constants.HVC_DEFAULTS
1492 else:
1493 for hypervisor in self.hvparams:
1494 self.hvparams[hypervisor] = FillDict(
1495 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1496
1497 if self.os_hvp is None:
1498 self.os_hvp = {}
1499
1500 # osparams added before 2.2
1501 if self.osparams is None:
1502 self.osparams = {}
1503
1504 self.ndparams = UpgradeNDParams(self.ndparams)
1505
1506 self.beparams = UpgradeGroupedParams(self.beparams,
1507 constants.BEC_DEFAULTS)
1508 for beparams_group in self.beparams:
1509 UpgradeBeParams(self.beparams[beparams_group])
1510
1511 migrate_default_bridge = not self.nicparams
1512 self.nicparams = UpgradeGroupedParams(self.nicparams,
1513 constants.NICC_DEFAULTS)
1514 if migrate_default_bridge:
1515 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1516 self.default_bridge
1517
1518 if self.modify_etc_hosts is None:
1519 self.modify_etc_hosts = True
1520
1521 if self.modify_ssh_setup is None:
1522 self.modify_ssh_setup = True
1523
1524 # default_bridge is no longer used in 2.1. The slot is left there to
1525 # support auto-upgrading. It can be removed once we decide to deprecate
1526 # upgrading straight from 2.0.
1527 if self.default_bridge is not None:
1528 self.default_bridge = None
1529
1530 # default_hypervisor is just the first enabled one in 2.1. This slot and
1531 # code can be removed once upgrading straight from 2.0 is deprecated.
1532 if self.default_hypervisor is not None:
1533 self.enabled_hypervisors = ([self.default_hypervisor] +
1534 [hvname for hvname in self.enabled_hypervisors
1535 if hvname != self.default_hypervisor])
1536 self.default_hypervisor = None
1537
1538 # maintain_node_health added after 2.1.1
1539 if self.maintain_node_health is None:
1540 self.maintain_node_health = False
1541
1542 if self.uid_pool is None:
1543 self.uid_pool = []
1544
1545 if self.default_iallocator is None:
1546 self.default_iallocator = ""
1547
1548 # reserved_lvs added before 2.2
1549 if self.reserved_lvs is None:
1550 self.reserved_lvs = []
1551
1552 # hidden and blacklisted operating systems added before 2.2.1
1553 if self.hidden_os is None:
1554 self.hidden_os = []
1555
1556 if self.blacklisted_os is None:
1557 self.blacklisted_os = []
1558
1559 # primary_ip_family added before 2.3
1560 if self.primary_ip_family is None:
1561 self.primary_ip_family = AF_INET
1562
1563 if self.master_netmask is None:
1564 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1565 self.master_netmask = ipcls.iplen
1566
1567 if self.prealloc_wipe_disks is None:
1568 self.prealloc_wipe_disks = False
1569
1570 # shared_file_storage_dir added before 2.5
1571 if self.shared_file_storage_dir is None:
1572 self.shared_file_storage_dir = ""
1573
1574 if self.use_external_mip_script is None:
1575 self.use_external_mip_script = False
1576
1577 if self.diskparams:
1578 self.diskparams = UpgradeDiskParams(self.diskparams)
1579 else:
1580 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1581
1582 # instance policy added before 2.6
1583 if self.ipolicy is None:
1584 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1585 else:
1586 # we can either make sure to upgrade the ipolicy always, or only
1587 # do it in some corner cases (e.g. missing keys); note that this
1588 # will break any removal of keys from the ipolicy dict
1589 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1590
1591 @property
1592 def primary_hypervisor(self):
1593 """The first hypervisor is the primary.
1594
1595 Useful, for example, for L{Node}'s hv/disk state.
1596
1597 """
1598 return self.enabled_hypervisors[0]
1599
1600 def ToDict(self):
1601 """Custom function for cluster.
1602
1603 """
1604 mydict = super(Cluster, self).ToDict()
1605 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1606 return mydict
1607
1608 @classmethod
1609 def FromDict(cls, val):
1610 """Custom function for cluster.
1611
1612 """
1613 obj = super(Cluster, cls).FromDict(val)
1614 if not isinstance(obj.tcpudp_port_pool, set):
1615 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1616 return obj
1617
1618 def SimpleFillDP(self, diskparams):
1619 """Fill a given diskparams dict with cluster defaults.
1620
1621 @param diskparams: The diskparams
1622 @return: The defaults dict
1623
1624 """
1625 return FillDiskParams(self.diskparams, diskparams)
1626
1627 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1628 """Get the default hypervisor parameters for the cluster.
1629
1630 @param hypervisor: the hypervisor name
1631 @param os_name: if specified, we'll also update the defaults for this OS
1632 @param skip_keys: if passed, list of keys not to use
1633 @return: the defaults dict
1634
1635 """
1636 if skip_keys is None:
1637 skip_keys = []
1638
1639 fill_stack = [self.hvparams.get(hypervisor, {})]
1640 if os_name is not None:
1641 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1642 fill_stack.append(os_hvp)
1643
1644 ret_dict = {}
1645 for o_dict in fill_stack:
1646 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1647
1648 return ret_dict
1649
1650 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1651 """Fill a given hvparams dict with cluster defaults.
1652
1653 @type hv_name: string
1654 @param hv_name: the hypervisor to use
1655 @type os_name: string
1656 @param os_name: the OS to use for overriding the hypervisor defaults
1657 @type skip_globals: boolean
1658 @param skip_globals: if True, the global hypervisor parameters will
1659 not be filled
1660 @rtype: dict
1661 @return: a copy of the given hvparams with missing keys filled from
1662 the cluster defaults
1663
1664 """
1665 if skip_globals:
1666 skip_keys = constants.HVC_GLOBALS
1667 else:
1668 skip_keys = []
1669
1670 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1671 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1672
1673 def FillHV(self, instance, skip_globals=False):
1674 """Fill an instance's hvparams dict with cluster defaults.
1675
1676 @type instance: L{objects.Instance}
1677 @param instance: the instance parameter to fill
1678 @type skip_globals: boolean
1679 @param skip_globals: if True, the global hypervisor parameters will
1680 not be filled
1681 @rtype: dict
1682 @return: a copy of the instance's hvparams with missing keys filled from
1683 the cluster defaults
1684
1685 """
1686 return self.SimpleFillHV(instance.hypervisor, instance.os,
1687 instance.hvparams, skip_globals)
1688
1689 def SimpleFillBE(self, beparams):
1690 """Fill a given beparams dict with cluster defaults.
1691
1692 @type beparams: dict
1693 @param beparams: the dict to fill
1694 @rtype: dict
1695 @return: a copy of the passed in beparams with missing keys filled
1696 from the cluster defaults
1697
1698 """
1699 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1700
1701 def FillBE(self, instance):
1702 """Fill an instance's beparams dict with cluster defaults.
1703
1704 @type instance: L{objects.Instance}
1705 @param instance: the instance parameter to fill
1706 @rtype: dict
1707 @return: a copy of the instance's beparams with missing keys filled from
1708 the cluster defaults
1709
1710 """
1711 return self.SimpleFillBE(instance.beparams)
1712
1713 def SimpleFillNIC(self, nicparams):
1714 """Fill a given nicparams dict with cluster defaults.
1715
1716 @type nicparams: dict
1717 @param nicparams: the dict to fill
1718 @rtype: dict
1719 @return: a copy of the passed in nicparams with missing keys filled
1720 from the cluster defaults
1721
1722 """
1723 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1724
1725 def SimpleFillOS(self, os_name, os_params):
1726 """Fill an instance's osparams dict with cluster defaults.
1727
1728 @type os_name: string
1729 @param os_name: the OS name to use
1730 @type os_params: dict
1731 @param os_params: the dict to fill with default values
1732 @rtype: dict
1733 @return: a copy of the instance's osparams with missing keys filled from
1734 the cluster defaults
1735
1736 """
1737 name_only = os_name.split("+", 1)[0]
1738 # base OS
1739 result = self.osparams.get(name_only, {})
1740 # OS with variant
1741 result = FillDict(result, self.osparams.get(os_name, {}))
1742 # specified params
1743 return FillDict(result, os_params)
1744
1745 @staticmethod
1746 def SimpleFillHvState(hv_state):
1747 """Fill an hv_state sub dict with cluster defaults.
1748
1749 """
1750 return FillDict(constants.HVST_DEFAULTS, hv_state)
1751
1752 @staticmethod
1753 def SimpleFillDiskState(disk_state):
1754 """Fill an disk_state sub dict with cluster defaults.
1755
1756 """
1757 return FillDict(constants.DS_DEFAULTS, disk_state)
1758
1759 def FillND(self, node, nodegroup):
1760 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1761
1762 @type node: L{objects.Node}
1763 @param node: A Node object to fill
1764 @type nodegroup: L{objects.NodeGroup}
1765 @param nodegroup: A Node object to fill
1766 @return a copy of the node's ndparams with defaults filled
1767
1768 """
1769 return self.SimpleFillND(nodegroup.FillND(node))
1770
1771 def SimpleFillND(self, ndparams):
1772 """Fill a given ndparams dict with defaults.
1773
1774 @type ndparams: dict
1775 @param ndparams: the dict to fill
1776 @rtype: dict
1777 @return: a copy of the passed in ndparams with missing keys filled
1778 from the cluster defaults
1779
1780 """
1781 return FillDict(self.ndparams, ndparams)
1782
1783 def SimpleFillIPolicy(self, ipolicy):
1784 """ Fill instance policy dict with defaults.
1785
1786 @type ipolicy: dict
1787 @param ipolicy: the dict to fill
1788 @rtype: dict
1789 @return: a copy of passed ipolicy with missing keys filled from
1790 the cluster defaults
1791
1792 """
1793 return FillIPolicy(self.ipolicy, ipolicy)
1794
1795
1796 class BlockDevStatus(ConfigObject):
1797 """Config object representing the status of a block device."""
1798 __slots__ = [
1799 "dev_path",
1800 "major",
1801 "minor",
1802 "sync_percent",
1803 "estimated_time",
1804 "is_degraded",
1805 "ldisk_status",
1806 ]
1807
1808
1809 class ImportExportStatus(ConfigObject):
1810 """Config object representing the status of an import or export."""
1811 __slots__ = [
1812 "recent_output",
1813 "listen_port",
1814 "connected",
1815 "progress_mbytes",
1816 "progress_throughput",
1817 "progress_eta",
1818 "progress_percent",
1819 "exit_status",
1820 "error_message",
1821 ] + _TIMESTAMPS
1822
1823
1824 class ImportExportOptions(ConfigObject):
1825 """Options for import/export daemon
1826
1827 @ivar key_name: X509 key name (None for cluster certificate)
1828 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1829 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1830 @ivar magic: Used to ensure the connection goes to the right disk
1831 @ivar ipv6: Whether to use IPv6
1832 @ivar connect_timeout: Number of seconds for establishing connection
1833
1834 """
1835 __slots__ = [
1836 "key_name",
1837 "ca_pem",
1838 "compress",
1839 "magic",
1840 "ipv6",
1841 "connect_timeout",
1842 ]
1843
1844
1845 class ConfdRequest(ConfigObject):
1846 """Object holding a confd request.
1847
1848 @ivar protocol: confd protocol version
1849 @ivar type: confd query type
1850 @ivar query: query request
1851 @ivar rsalt: requested reply salt
1852
1853 """
1854 __slots__ = [
1855 "protocol",
1856 "type",
1857 "query",
1858 "rsalt",
1859 ]
1860
1861
1862 class ConfdReply(ConfigObject):
1863 """Object holding a confd reply.
1864
1865 @ivar protocol: confd protocol version
1866 @ivar status: reply status code (ok, error)
1867 @ivar answer: confd query reply
1868 @ivar serial: configuration serial number
1869
1870 """
1871 __slots__ = [
1872 "protocol",
1873 "status",
1874 "answer",
1875 "serial",
1876 ]
1877
1878
1879 class QueryFieldDefinition(ConfigObject):
1880 """Object holding a query field definition.
1881
1882 @ivar name: Field name
1883 @ivar title: Human-readable title
1884 @ivar kind: Field type
1885 @ivar doc: Human-readable description
1886
1887 """
1888 __slots__ = [
1889 "name",
1890 "title",
1891 "kind",
1892 "doc",
1893 ]
1894
1895
1896 class _QueryResponseBase(ConfigObject):
1897 __slots__ = [
1898 "fields",
1899 ]
1900
1901 def ToDict(self):
1902 """Custom function for serializing.
1903
1904 """
1905 mydict = super(_QueryResponseBase, self).ToDict()
1906 mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1907 return mydict
1908
1909 @classmethod
1910 def FromDict(cls, val):
1911 """Custom function for de-serializing.
1912
1913 """
1914 obj = super(_QueryResponseBase, cls).FromDict(val)
1915 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1916 return obj
1917
1918
1919 class QueryResponse(_QueryResponseBase):
1920 """Object holding the response to a query.
1921
1922 @ivar fields: List of L{QueryFieldDefinition} objects
1923 @ivar data: Requested data
1924
1925 """
1926 __slots__ = [
1927 "data",
1928 ]
1929
1930
1931 class QueryFieldsRequest(ConfigObject):
1932 """Object holding a request for querying available fields.
1933
1934 """
1935 __slots__ = [
1936 "what",
1937 "fields",
1938 ]
1939
1940
1941 class QueryFieldsResponse(_QueryResponseBase):
1942 """Object holding the response to a query for fields.
1943
1944 @ivar fields: List of L{QueryFieldDefinition} objects
1945
1946 """
1947 __slots__ = []
1948
1949
1950 class MigrationStatus(ConfigObject):
1951 """Object holding the status of a migration.
1952
1953 """
1954 __slots__ = [
1955 "status",
1956 "transferred_ram",
1957 "total_ram",
1958 ]
1959
1960
1961 class InstanceConsole(ConfigObject):
1962 """Object describing how to access the console of an instance.
1963
1964 """
1965 __slots__ = [
1966 "instance",
1967 "kind",
1968 "message",
1969 "host",
1970 "port",
1971 "user",
1972 "command",
1973 "display",
1974 ]
1975
1976 def Validate(self):
1977 """Validates contents of this object.
1978
1979 """
1980 assert self.kind in constants.CONS_ALL, "Unknown console type"
1981 assert self.instance, "Missing instance name"
1982 assert self.message or self.kind in [constants.CONS_SSH,
1983 constants.CONS_SPICE,
1984 constants.CONS_VNC]
1985 assert self.host or self.kind == constants.CONS_MESSAGE
1986 assert self.port or self.kind in [constants.CONS_MESSAGE,
1987 constants.CONS_SSH]
1988 assert self.user or self.kind in [constants.CONS_MESSAGE,
1989 constants.CONS_SPICE,
1990 constants.CONS_VNC]
1991 assert self.command or self.kind in [constants.CONS_MESSAGE,
1992 constants.CONS_SPICE,
1993 constants.CONS_VNC]
1994 assert self.display or self.kind in [constants.CONS_MESSAGE,
1995 constants.CONS_SPICE,
1996 constants.CONS_SSH]
1997 return True
1998
1999
2000 class Network(TaggableObject):
2001 """Object representing a network definition for ganeti.
2002
2003 """
2004 __slots__ = [
2005 "name",
2006 "serial_no",
2007 "network_type",
2008 "mac_prefix",
2009 "family",
2010 "network",
2011 "network6",
2012 "gateway",
2013 "gateway6",
2014 "size",
2015 "reservations",
2016 "ext_reservations",
2017 ] + _TIMESTAMPS + _UUID
2018
2019
2020 class SerializableConfigParser(ConfigParser.SafeConfigParser):
2021 """Simple wrapper over ConfigParse that allows serialization.
2022
2023 This class is basically ConfigParser.SafeConfigParser with two
2024 additional methods that allow it to serialize/unserialize to/from a
2025 buffer.
2026
2027 """
2028 def Dumps(self):
2029 """Dump this instance and return the string representation."""
2030 buf = StringIO()
2031 self.write(buf)
2032 return buf.getvalue()
2033
2034 @classmethod
2035 def Loads(cls, data):
2036 """Load data from a string."""
2037 buf = StringIO(data)
2038 cfp = cls()
2039 cfp.readfp(buf)
2040 return cfp