ad9f1d7cc55fbb06fea91937e6c6da03aefb3d6f
[ganeti-github.git] / lib / objects.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Transportable objects for Ganeti.
23
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
26
27 """
28
29 # pylint: disable=E0203,W0201,R0902
30
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitly initialise its members
33
34 # W0201: Attribute '%s' defined outside __init__
35
36 # R0902: Allow instances of these objects to have more than 20 attributes
37
38 import ConfigParser
39 import re
40 import copy
41 import logging
42 import time
43 from cStringIO import StringIO
44
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import netutils
48 from ganeti import outils
49 from ganeti import utils
50
51 from socket import AF_INET
52
53
54 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56
57 _TIMESTAMPS = ["ctime", "mtime"]
58 _UUID = ["uuid"]
59
60
61 def FillDict(defaults_dict, custom_dict, skip_keys=None):
62 """Basic function to apply settings on top a default dict.
63
64 @type defaults_dict: dict
65 @param defaults_dict: dictionary holding the default values
66 @type custom_dict: dict
67 @param custom_dict: dictionary holding customized value
68 @type skip_keys: list
69 @param skip_keys: which keys not to fill
70 @rtype: dict
71 @return: dict with the 'full' values
72
73 """
74 ret_dict = copy.deepcopy(defaults_dict)
75 ret_dict.update(custom_dict)
76 if skip_keys:
77 for k in skip_keys:
78 try:
79 del ret_dict[k]
80 except KeyError:
81 pass
82 return ret_dict
83
84
85 def FillIPolicy(default_ipolicy, custom_ipolicy):
86 """Fills an instance policy with defaults.
87
88 """
89 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90 ret_dict = copy.deepcopy(custom_ipolicy)
91 for key in default_ipolicy:
92 if key not in ret_dict:
93 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94 elif key == constants.ISPECS_STD:
95 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96 return ret_dict
97
98
99 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100 """Fills the disk parameter defaults.
101
102 @see: L{FillDict} for parameters and return value
103
104 """
105 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106
107 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108 skip_keys=skip_keys))
109 for dt in constants.DISK_TEMPLATES)
110
111
112 def UpgradeGroupedParams(target, defaults):
113 """Update all groups for the target parameter.
114
115 @type target: dict of dicts
116 @param target: {group: {parameter: value}}
117 @type defaults: dict
118 @param defaults: default parameter values
119
120 """
121 if target is None:
122 target = {constants.PP_DEFAULT: defaults}
123 else:
124 for group in target:
125 target[group] = FillDict(defaults, target[group])
126 return target
127
128
129 def UpgradeBeParams(target):
130 """Update the be parameters dict to the new format.
131
132 @type target: dict
133 @param target: "be" parameters dict
134
135 """
136 if constants.BE_MEMORY in target:
137 memory = target[constants.BE_MEMORY]
138 target[constants.BE_MAXMEM] = memory
139 target[constants.BE_MINMEM] = memory
140 del target[constants.BE_MEMORY]
141
142
143 def UpgradeDiskParams(diskparams):
144 """Upgrade the disk parameters.
145
146 @type diskparams: dict
147 @param diskparams: disk parameters to upgrade
148 @rtype: dict
149 @return: the upgraded disk parameters dict
150
151 """
152 if not diskparams:
153 result = {}
154 else:
155 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156
157 return result
158
159
160 def UpgradeNDParams(ndparams):
161 """Upgrade ndparams structure.
162
163 @type ndparams: dict
164 @param ndparams: disk parameters to upgrade
165 @rtype: dict
166 @return: the upgraded node parameters dict
167
168 """
169 if ndparams is None:
170 ndparams = {}
171
172 if (constants.ND_OOB_PROGRAM in ndparams and
173 ndparams[constants.ND_OOB_PROGRAM] is None):
174 # will be reset by the line below
175 del ndparams[constants.ND_OOB_PROGRAM]
176 return FillDict(constants.NDC_DEFAULTS, ndparams)
177
178
179 def MakeEmptyIPolicy():
180 """Create empty IPolicy dictionary.
181
182 """
183 return {}
184
185
186 class ConfigObject(outils.ValidatedSlots):
187 """A generic config object.
188
189 It has the following properties:
190
191 - provides somewhat safe recursive unpickling and pickling for its classes
192 - unset attributes which are defined in slots are always returned
193 as None instead of raising an error
194
195 Classes derived from this must always declare __slots__ (we use many
196 config objects and the memory reduction is useful)
197
198 """
199 __slots__ = []
200
201 def __getattr__(self, name):
202 if name not in self.GetAllSlots():
203 raise AttributeError("Invalid object attribute %s.%s" %
204 (type(self).__name__, name))
205 return None
206
207 def __setstate__(self, state):
208 slots = self.GetAllSlots()
209 for name in state:
210 if name in slots:
211 setattr(self, name, state[name])
212
213 def Validate(self):
214 """Validates the slots.
215
216 """
217
218 def ToDict(self):
219 """Convert to a dict holding only standard python types.
220
221 The generic routine just dumps all of this object's attributes in
222 a dict. It does not work if the class has children who are
223 ConfigObjects themselves (e.g. the nics list in an Instance), in
224 which case the object should subclass the function in order to
225 make sure all objects returned are only standard python types.
226
227 """
228 result = {}
229 for name in self.GetAllSlots():
230 value = getattr(self, name, None)
231 if value is not None:
232 result[name] = value
233 return result
234
235 __getstate__ = ToDict
236
237 @classmethod
238 def FromDict(cls, val):
239 """Create an object from a dictionary.
240
241 This generic routine takes a dict, instantiates a new instance of
242 the given class, and sets attributes based on the dict content.
243
244 As for `ToDict`, this does not work if the class has children
245 who are ConfigObjects themselves (e.g. the nics list in an
246 Instance), in which case the object should subclass the function
247 and alter the objects.
248
249 """
250 if not isinstance(val, dict):
251 raise errors.ConfigurationError("Invalid object passed to FromDict:"
252 " expected dict, got %s" % type(val))
253 val_str = dict([(str(k), v) for k, v in val.iteritems()])
254 obj = cls(**val_str) # pylint: disable=W0142
255 return obj
256
257 def Copy(self):
258 """Makes a deep copy of the current object and its children.
259
260 """
261 dict_form = self.ToDict()
262 clone_obj = self.__class__.FromDict(dict_form)
263 return clone_obj
264
265 def __repr__(self):
266 """Implement __repr__ for ConfigObjects."""
267 return repr(self.ToDict())
268
269 def UpgradeConfig(self):
270 """Fill defaults for missing configuration values.
271
272 This method will be called at configuration load time, and its
273 implementation will be object dependent.
274
275 """
276 pass
277
278
279 class TaggableObject(ConfigObject):
280 """An generic class supporting tags.
281
282 """
283 __slots__ = ["tags"]
284 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285
286 @classmethod
287 def ValidateTag(cls, tag):
288 """Check if a tag is valid.
289
290 If the tag is invalid, an errors.TagError will be raised. The
291 function has no return value.
292
293 """
294 if not isinstance(tag, basestring):
295 raise errors.TagError("Invalid tag type (not a string)")
296 if len(tag) > constants.MAX_TAG_LEN:
297 raise errors.TagError("Tag too long (>%d characters)" %
298 constants.MAX_TAG_LEN)
299 if not tag:
300 raise errors.TagError("Tags cannot be empty")
301 if not cls.VALID_TAG_RE.match(tag):
302 raise errors.TagError("Tag contains invalid characters")
303
304 def GetTags(self):
305 """Return the tags list.
306
307 """
308 tags = getattr(self, "tags", None)
309 if tags is None:
310 tags = self.tags = set()
311 return tags
312
313 def AddTag(self, tag):
314 """Add a new tag.
315
316 """
317 self.ValidateTag(tag)
318 tags = self.GetTags()
319 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320 raise errors.TagError("Too many tags")
321 self.GetTags().add(tag)
322
323 def RemoveTag(self, tag):
324 """Remove a tag.
325
326 """
327 self.ValidateTag(tag)
328 tags = self.GetTags()
329 try:
330 tags.remove(tag)
331 except KeyError:
332 raise errors.TagError("Tag not found")
333
334 def ToDict(self):
335 """Taggable-object-specific conversion to standard python types.
336
337 This replaces the tags set with a list.
338
339 """
340 bo = super(TaggableObject, self).ToDict()
341
342 tags = bo.get("tags", None)
343 if isinstance(tags, set):
344 bo["tags"] = list(tags)
345 return bo
346
347 @classmethod
348 def FromDict(cls, val):
349 """Custom function for instances.
350
351 """
352 obj = super(TaggableObject, cls).FromDict(val)
353 if hasattr(obj, "tags") and isinstance(obj.tags, list):
354 obj.tags = set(obj.tags)
355 return obj
356
357
358 class MasterNetworkParameters(ConfigObject):
359 """Network configuration parameters for the master
360
361 @ivar name: master name
362 @ivar ip: master IP
363 @ivar netmask: master netmask
364 @ivar netdev: master network device
365 @ivar ip_family: master IP family
366
367 """
368 __slots__ = [
369 "name",
370 "ip",
371 "netmask",
372 "netdev",
373 "ip_family",
374 ]
375
376
377 class ConfigData(ConfigObject):
378 """Top-level config object."""
379 __slots__ = [
380 "version",
381 "cluster",
382 "nodes",
383 "nodegroups",
384 "instances",
385 "networks",
386 "serial_no",
387 ] + _TIMESTAMPS
388
389 def ToDict(self):
390 """Custom function for top-level config data.
391
392 This just replaces the list of instances, nodes and the cluster
393 with standard python types.
394
395 """
396 mydict = super(ConfigData, self).ToDict()
397 mydict["cluster"] = mydict["cluster"].ToDict()
398 for key in "nodes", "instances", "nodegroups", "networks":
399 mydict[key] = outils.ContainerToDicts(mydict[key])
400
401 return mydict
402
403 @classmethod
404 def FromDict(cls, val):
405 """Custom function for top-level config data
406
407 """
408 obj = super(ConfigData, cls).FromDict(val)
409 obj.cluster = Cluster.FromDict(obj.cluster)
410 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411 obj.instances = \
412 outils.ContainerFromDicts(obj.instances, dict, Instance)
413 obj.nodegroups = \
414 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416 return obj
417
418 def HasAnyDiskOfType(self, dev_type):
419 """Check if in there is at disk of the given type in the configuration.
420
421 @type dev_type: L{constants.LDS_BLOCK}
422 @param dev_type: the type to look for
423 @rtype: boolean
424 @return: boolean indicating if a disk of the given type was found or not
425
426 """
427 for instance in self.instances.values():
428 for disk in instance.disks:
429 if disk.IsBasedOnDiskType(dev_type):
430 return True
431 return False
432
433 def UpgradeConfig(self):
434 """Fill defaults for missing configuration values.
435
436 """
437 self.cluster.UpgradeConfig()
438 for node in self.nodes.values():
439 node.UpgradeConfig()
440 for instance in self.instances.values():
441 instance.UpgradeConfig()
442 if self.nodegroups is None:
443 self.nodegroups = {}
444 for nodegroup in self.nodegroups.values():
445 nodegroup.UpgradeConfig()
446 if self.cluster.drbd_usermode_helper is None:
447 # To decide if we set an helper let's check if at least one instance has
448 # a DRBD disk. This does not cover all the possible scenarios but it
449 # gives a good approximation.
450 if self.HasAnyDiskOfType(constants.LD_DRBD8):
451 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
452 if self.networks is None:
453 self.networks = {}
454 for network in self.networks.values():
455 network.UpgradeConfig()
456 self._UpgradeEnabledDiskTemplates()
457
458 def _UpgradeEnabledDiskTemplates(self):
459 """Upgrade the cluster's enabled disk templates by inspecting the currently
460 enabled and/or used disk templates.
461
462 """
463 # enabled_disk_templates in the cluster config were introduced in 2.8.
464 # Remove this code once upgrading from earlier versions is deprecated.
465 if not self.cluster.enabled_disk_templates:
466 template_set = \
467 set([inst.disk_template for inst in self.instances.values()])
468 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469 if self.cluster.volume_group_name:
470 template_set.add(constants.DT_DRBD8)
471 template_set.add(constants.DT_PLAIN)
472 # FIXME: Adapt this when dis/enabling at configure time is removed.
473 # Enable 'file' and 'sharedfile', if they are enabled, even though they
474 # might currently not be used.
475 if constants.ENABLE_FILE_STORAGE:
476 template_set.add(constants.DT_FILE)
477 if constants.ENABLE_SHARED_FILE_STORAGE:
478 template_set.add(constants.DT_SHARED_FILE)
479 # Set enabled_disk_templates to the inferred disk templates. Order them
480 # according to a preference list that is based on Ganeti's history of
481 # supported disk templates.
482 self.cluster.enabled_disk_templates = []
483 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
484 if preferred_template in template_set:
485 self.cluster.enabled_disk_templates.append(preferred_template)
486 template_set.remove(preferred_template)
487 self.cluster.enabled_disk_templates.extend(list(template_set))
488
489
490 class NIC(ConfigObject):
491 """Config object representing a network card."""
492 __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
493
494 @classmethod
495 def CheckParameterSyntax(cls, nicparams):
496 """Check the given parameters for validity.
497
498 @type nicparams: dict
499 @param nicparams: dictionary with parameter names/value
500 @raise errors.ConfigurationError: when a parameter is not valid
501
502 """
503 mode = nicparams[constants.NIC_MODE]
504 if (mode not in constants.NIC_VALID_MODES and
505 mode != constants.VALUE_AUTO):
506 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
507
508 if (mode == constants.NIC_MODE_BRIDGED and
509 not nicparams[constants.NIC_LINK]):
510 raise errors.ConfigurationError("Missing bridged NIC link")
511
512
513 class Disk(ConfigObject):
514 """Config object representing a block device."""
515 __slots__ = ["name", "dev_type", "logical_id", "physical_id",
516 "children", "iv_name", "size", "mode", "params"] + _UUID
517
518 def CreateOnSecondary(self):
519 """Test if this device needs to be created on a secondary node."""
520 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
521
522 def AssembleOnSecondary(self):
523 """Test if this device needs to be assembled on a secondary node."""
524 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
525
526 def OpenOnSecondary(self):
527 """Test if this device needs to be opened on a secondary node."""
528 return self.dev_type in (constants.LD_LV,)
529
530 def StaticDevPath(self):
531 """Return the device path if this device type has a static one.
532
533 Some devices (LVM for example) live always at the same /dev/ path,
534 irrespective of their status. For such devices, we return this
535 path, for others we return None.
536
537 @warning: The path returned is not a normalized pathname; callers
538 should check that it is a valid path.
539
540 """
541 if self.dev_type == constants.LD_LV:
542 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
543 elif self.dev_type == constants.LD_BLOCKDEV:
544 return self.logical_id[1]
545 elif self.dev_type == constants.LD_RBD:
546 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
547 return None
548
549 def ChildrenNeeded(self):
550 """Compute the needed number of children for activation.
551
552 This method will return either -1 (all children) or a positive
553 number denoting the minimum number of children needed for
554 activation (only mirrored devices will usually return >=0).
555
556 Currently, only DRBD8 supports diskless activation (therefore we
557 return 0), for all other we keep the previous semantics and return
558 -1.
559
560 """
561 if self.dev_type == constants.LD_DRBD8:
562 return 0
563 return -1
564
565 def IsBasedOnDiskType(self, dev_type):
566 """Check if the disk or its children are based on the given type.
567
568 @type dev_type: L{constants.LDS_BLOCK}
569 @param dev_type: the type to look for
570 @rtype: boolean
571 @return: boolean indicating if a device of the given type was found or not
572
573 """
574 if self.children:
575 for child in self.children:
576 if child.IsBasedOnDiskType(dev_type):
577 return True
578 return self.dev_type == dev_type
579
580 def GetNodes(self, node):
581 """This function returns the nodes this device lives on.
582
583 Given the node on which the parent of the device lives on (or, in
584 case of a top-level device, the primary node of the devices'
585 instance), this function will return a list of nodes on which this
586 devices needs to (or can) be assembled.
587
588 """
589 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
590 constants.LD_BLOCKDEV, constants.LD_RBD,
591 constants.LD_EXT]:
592 result = [node]
593 elif self.dev_type in constants.LDS_DRBD:
594 result = [self.logical_id[0], self.logical_id[1]]
595 if node not in result:
596 raise errors.ConfigurationError("DRBD device passed unknown node")
597 else:
598 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
599 return result
600
601 def ComputeNodeTree(self, parent_node):
602 """Compute the node/disk tree for this disk and its children.
603
604 This method, given the node on which the parent disk lives, will
605 return the list of all (node, disk) pairs which describe the disk
606 tree in the most compact way. For example, a drbd/lvm stack
607 will be returned as (primary_node, drbd) and (secondary_node, drbd)
608 which represents all the top-level devices on the nodes.
609
610 """
611 my_nodes = self.GetNodes(parent_node)
612 result = [(node, self) for node in my_nodes]
613 if not self.children:
614 # leaf device
615 return result
616 for node in my_nodes:
617 for child in self.children:
618 child_result = child.ComputeNodeTree(node)
619 if len(child_result) == 1:
620 # child (and all its descendants) is simple, doesn't split
621 # over multiple hosts, so we don't need to describe it, our
622 # own entry for this node describes it completely
623 continue
624 else:
625 # check if child nodes differ from my nodes; note that
626 # subdisk can differ from the child itself, and be instead
627 # one of its descendants
628 for subnode, subdisk in child_result:
629 if subnode not in my_nodes:
630 result.append((subnode, subdisk))
631 # otherwise child is under our own node, so we ignore this
632 # entry (but probably the other results in the list will
633 # be different)
634 return result
635
636 def ComputeGrowth(self, amount):
637 """Compute the per-VG growth requirements.
638
639 This only works for VG-based disks.
640
641 @type amount: integer
642 @param amount: the desired increase in (user-visible) disk space
643 @rtype: dict
644 @return: a dictionary of volume-groups and the required size
645
646 """
647 if self.dev_type == constants.LD_LV:
648 return {self.logical_id[0]: amount}
649 elif self.dev_type == constants.LD_DRBD8:
650 if self.children:
651 return self.children[0].ComputeGrowth(amount)
652 else:
653 return {}
654 else:
655 # Other disk types do not require VG space
656 return {}
657
658 def RecordGrow(self, amount):
659 """Update the size of this disk after growth.
660
661 This method recurses over the disks's children and updates their
662 size correspondigly. The method needs to be kept in sync with the
663 actual algorithms from bdev.
664
665 """
666 if self.dev_type in (constants.LD_LV, constants.LD_FILE,
667 constants.LD_RBD, constants.LD_EXT):
668 self.size += amount
669 elif self.dev_type == constants.LD_DRBD8:
670 if self.children:
671 self.children[0].RecordGrow(amount)
672 self.size += amount
673 else:
674 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
675 " disk type %s" % self.dev_type)
676
677 def Update(self, size=None, mode=None):
678 """Apply changes to size and mode.
679
680 """
681 if self.dev_type == constants.LD_DRBD8:
682 if self.children:
683 self.children[0].Update(size=size, mode=mode)
684 else:
685 assert not self.children
686
687 if size is not None:
688 self.size = size
689 if mode is not None:
690 self.mode = mode
691
692 def UnsetSize(self):
693 """Sets recursively the size to zero for the disk and its children.
694
695 """
696 if self.children:
697 for child in self.children:
698 child.UnsetSize()
699 self.size = 0
700
701 def SetPhysicalID(self, target_node, nodes_ip):
702 """Convert the logical ID to the physical ID.
703
704 This is used only for drbd, which needs ip/port configuration.
705
706 The routine descends down and updates its children also, because
707 this helps when the only the top device is passed to the remote
708 node.
709
710 Arguments:
711 - target_node: the node we wish to configure for
712 - nodes_ip: a mapping of node name to ip
713
714 The target_node must exist in in nodes_ip, and must be one of the
715 nodes in the logical ID for each of the DRBD devices encountered
716 in the disk tree.
717
718 """
719 if self.children:
720 for child in self.children:
721 child.SetPhysicalID(target_node, nodes_ip)
722
723 if self.logical_id is None and self.physical_id is not None:
724 return
725 if self.dev_type in constants.LDS_DRBD:
726 pnode, snode, port, pminor, sminor, secret = self.logical_id
727 if target_node not in (pnode, snode):
728 raise errors.ConfigurationError("DRBD device not knowing node %s" %
729 target_node)
730 pnode_ip = nodes_ip.get(pnode, None)
731 snode_ip = nodes_ip.get(snode, None)
732 if pnode_ip is None or snode_ip is None:
733 raise errors.ConfigurationError("Can't find primary or secondary node"
734 " for %s" % str(self))
735 p_data = (pnode_ip, port)
736 s_data = (snode_ip, port)
737 if pnode == target_node:
738 self.physical_id = p_data + s_data + (pminor, secret)
739 else: # it must be secondary, we tested above
740 self.physical_id = s_data + p_data + (sminor, secret)
741 else:
742 self.physical_id = self.logical_id
743 return
744
745 def ToDict(self):
746 """Disk-specific conversion to standard python types.
747
748 This replaces the children lists of objects with lists of
749 standard python types.
750
751 """
752 bo = super(Disk, self).ToDict()
753
754 for attr in ("children",):
755 alist = bo.get(attr, None)
756 if alist:
757 bo[attr] = outils.ContainerToDicts(alist)
758 return bo
759
760 @classmethod
761 def FromDict(cls, val):
762 """Custom function for Disks
763
764 """
765 obj = super(Disk, cls).FromDict(val)
766 if obj.children:
767 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
768 if obj.logical_id and isinstance(obj.logical_id, list):
769 obj.logical_id = tuple(obj.logical_id)
770 if obj.physical_id and isinstance(obj.physical_id, list):
771 obj.physical_id = tuple(obj.physical_id)
772 if obj.dev_type in constants.LDS_DRBD:
773 # we need a tuple of length six here
774 if len(obj.logical_id) < 6:
775 obj.logical_id += (None,) * (6 - len(obj.logical_id))
776 return obj
777
778 def __str__(self):
779 """Custom str() formatter for disks.
780
781 """
782 if self.dev_type == constants.LD_LV:
783 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
784 elif self.dev_type in constants.LDS_DRBD:
785 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
786 val = "<DRBD8("
787 if self.physical_id is None:
788 phy = "unconfigured"
789 else:
790 phy = ("configured as %s:%s %s:%s" %
791 (self.physical_id[0], self.physical_id[1],
792 self.physical_id[2], self.physical_id[3]))
793
794 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
795 (node_a, minor_a, node_b, minor_b, port, phy))
796 if self.children and self.children.count(None) == 0:
797 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
798 else:
799 val += "no local storage"
800 else:
801 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
802 (self.dev_type, self.logical_id, self.physical_id, self.children))
803 if self.iv_name is None:
804 val += ", not visible"
805 else:
806 val += ", visible as /dev/%s" % self.iv_name
807 if isinstance(self.size, int):
808 val += ", size=%dm)>" % self.size
809 else:
810 val += ", size='%s')>" % (self.size,)
811 return val
812
813 def Verify(self):
814 """Checks that this disk is correctly configured.
815
816 """
817 all_errors = []
818 if self.mode not in constants.DISK_ACCESS_SET:
819 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
820 return all_errors
821
822 def UpgradeConfig(self):
823 """Fill defaults for missing configuration values.
824
825 """
826 if self.children:
827 for child in self.children:
828 child.UpgradeConfig()
829
830 # FIXME: Make this configurable in Ganeti 2.7
831 self.params = {}
832 # add here config upgrade for this disk
833
834 @staticmethod
835 def ComputeLDParams(disk_template, disk_params):
836 """Computes Logical Disk parameters from Disk Template parameters.
837
838 @type disk_template: string
839 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
840 @type disk_params: dict
841 @param disk_params: disk template parameters;
842 dict(template_name -> parameters
843 @rtype: list(dict)
844 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
845 contains the LD parameters of the node. The tree is flattened in-order.
846
847 """
848 if disk_template not in constants.DISK_TEMPLATES:
849 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
850
851 assert disk_template in disk_params
852
853 result = list()
854 dt_params = disk_params[disk_template]
855 if disk_template == constants.DT_DRBD8:
856 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
857 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
858 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
859 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
860 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
861 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
862 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
863 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
864 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
865 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
866 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
867 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
868 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
869 }))
870
871 # data LV
872 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
873 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
874 }))
875
876 # metadata LV
877 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
878 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
879 }))
880
881 elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
882 result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
883
884 elif disk_template == constants.DT_PLAIN:
885 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
886 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
887 }))
888
889 elif disk_template == constants.DT_BLOCK:
890 result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
891
892 elif disk_template == constants.DT_RBD:
893 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
894 constants.LDP_POOL: dt_params[constants.RBD_POOL],
895 }))
896
897 elif disk_template == constants.DT_EXT:
898 result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
899
900 return result
901
902
903 class InstancePolicy(ConfigObject):
904 """Config object representing instance policy limits dictionary.
905
906 Note that this object is not actually used in the config, it's just
907 used as a placeholder for a few functions.
908
909 """
910 @classmethod
911 def CheckParameterSyntax(cls, ipolicy, check_std):
912 """ Check the instance policy for validity.
913
914 @type ipolicy: dict
915 @param ipolicy: dictionary with min/max/std specs and policies
916 @type check_std: bool
917 @param check_std: Whether to check std value or just assume compliance
918 @raise errors.ConfigurationError: when the policy is not legal
919
920 """
921 InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
922 if constants.IPOLICY_DTS in ipolicy:
923 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
924 for key in constants.IPOLICY_PARAMETERS:
925 if key in ipolicy:
926 InstancePolicy.CheckParameter(key, ipolicy[key])
927 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
928 if wrong_keys:
929 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
930 utils.CommaJoin(wrong_keys))
931
932 @classmethod
933 def _CheckIncompleteSpec(cls, spec, keyname):
934 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
935 if missing_params:
936 msg = ("Missing instance specs parameters for %s: %s" %
937 (keyname, utils.CommaJoin(missing_params)))
938 raise errors.ConfigurationError(msg)
939
940 @classmethod
941 def CheckISpecSyntax(cls, ipolicy, check_std):
942 """Check the instance policy specs for validity.
943
944 @type ipolicy: dict
945 @param ipolicy: dictionary with min/max/std specs
946 @type check_std: bool
947 @param check_std: Whether to check std value or just assume compliance
948 @raise errors.ConfigurationError: when specs are not valid
949
950 """
951 if constants.ISPECS_MINMAX not in ipolicy:
952 # Nothing to check
953 return
954
955 if check_std and constants.ISPECS_STD not in ipolicy:
956 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
957 raise errors.ConfigurationError(msg)
958 stdspec = ipolicy.get(constants.ISPECS_STD)
959 if check_std:
960 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
961
962 if not ipolicy[constants.ISPECS_MINMAX]:
963 raise errors.ConfigurationError("Empty minmax specifications")
964 std_is_good = False
965 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
966 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
967 if missing:
968 msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
969 raise errors.ConfigurationError(msg)
970 for (key, spec) in minmaxspecs.items():
971 InstancePolicy._CheckIncompleteSpec(spec, key)
972
973 spec_std_ok = True
974 for param in constants.ISPECS_PARAMETERS:
975 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
976 param, check_std)
977 spec_std_ok = spec_std_ok and par_std_ok
978 std_is_good = std_is_good or spec_std_ok
979 if not std_is_good:
980 raise errors.ConfigurationError("Invalid std specifications")
981
982 @classmethod
983 def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
984 """Check the instance policy specs for validity on a given key.
985
986 We check if the instance specs makes sense for a given key, that is
987 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
988
989 @type minmaxspecs: dict
990 @param minmaxspecs: dictionary with min and max instance spec
991 @type stdspec: dict
992 @param stdspec: dictionary with standard instance spec
993 @type name: string
994 @param name: what are the limits for
995 @type check_std: bool
996 @param check_std: Whether to check std value or just assume compliance
997 @rtype: bool
998 @return: C{True} when specs are valid, C{False} when standard spec for the
999 given name is not valid
1000 @raise errors.ConfigurationError: when min/max specs for the given name
1001 are not valid
1002
1003 """
1004 minspec = minmaxspecs[constants.ISPECS_MIN]
1005 maxspec = minmaxspecs[constants.ISPECS_MAX]
1006 min_v = minspec[name]
1007 max_v = maxspec[name]
1008
1009 if min_v > max_v:
1010 err = ("Invalid specification of min/max values for %s: %s/%s" %
1011 (name, min_v, max_v))
1012 raise errors.ConfigurationError(err)
1013 elif check_std:
1014 std_v = stdspec.get(name, min_v)
1015 return std_v >= min_v and std_v <= max_v
1016 else:
1017 return True
1018
1019 @classmethod
1020 def CheckDiskTemplates(cls, disk_templates):
1021 """Checks the disk templates for validity.
1022
1023 """
1024 if not disk_templates:
1025 raise errors.ConfigurationError("Instance policy must contain" +
1026 " at least one disk template")
1027 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1028 if wrong:
1029 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1030 utils.CommaJoin(wrong))
1031
1032 @classmethod
1033 def CheckParameter(cls, key, value):
1034 """Checks a parameter.
1035
1036 Currently we expect all parameters to be float values.
1037
1038 """
1039 try:
1040 float(value)
1041 except (TypeError, ValueError), err:
1042 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1043 " '%s', error: %s" % (key, value, err))
1044
1045
1046 class Instance(TaggableObject):
1047 """Config object representing an instance."""
1048 __slots__ = [
1049 "name",
1050 "primary_node",
1051 "os",
1052 "hypervisor",
1053 "hvparams",
1054 "beparams",
1055 "osparams",
1056 "admin_state",
1057 "nics",
1058 "disks",
1059 "disk_template",
1060 "disks_active",
1061 "network_port",
1062 "serial_no",
1063 ] + _TIMESTAMPS + _UUID
1064
1065 def _ComputeSecondaryNodes(self):
1066 """Compute the list of secondary nodes.
1067
1068 This is a simple wrapper over _ComputeAllNodes.
1069
1070 """
1071 all_nodes = set(self._ComputeAllNodes())
1072 all_nodes.discard(self.primary_node)
1073 return tuple(all_nodes)
1074
1075 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1076 "List of names of secondary nodes")
1077
1078 def _ComputeAllNodes(self):
1079 """Compute the list of all nodes.
1080
1081 Since the data is already there (in the drbd disks), keeping it as
1082 a separate normal attribute is redundant and if not properly
1083 synchronised can cause problems. Thus it's better to compute it
1084 dynamically.
1085
1086 """
1087 def _Helper(nodes, device):
1088 """Recursively computes nodes given a top device."""
1089 if device.dev_type in constants.LDS_DRBD:
1090 nodea, nodeb = device.logical_id[:2]
1091 nodes.add(nodea)
1092 nodes.add(nodeb)
1093 if device.children:
1094 for child in device.children:
1095 _Helper(nodes, child)
1096
1097 all_nodes = set()
1098 all_nodes.add(self.primary_node)
1099 for device in self.disks:
1100 _Helper(all_nodes, device)
1101 return tuple(all_nodes)
1102
1103 all_nodes = property(_ComputeAllNodes, None, None,
1104 "List of names of all the nodes of the instance")
1105
1106 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1107 """Provide a mapping of nodes to LVs this instance owns.
1108
1109 This function figures out what logical volumes should belong on
1110 which nodes, recursing through a device tree.
1111
1112 @param lvmap: optional dictionary to receive the
1113 'node' : ['lv', ...] data.
1114
1115 @return: None if lvmap arg is given, otherwise, a dictionary of
1116 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1117 volumeN is of the form "vg_name/lv_name", compatible with
1118 GetVolumeList()
1119
1120 """
1121 if node is None:
1122 node = self.primary_node
1123
1124 if lvmap is None:
1125 lvmap = {
1126 node: [],
1127 }
1128 ret = lvmap
1129 else:
1130 if not node in lvmap:
1131 lvmap[node] = []
1132 ret = None
1133
1134 if not devs:
1135 devs = self.disks
1136
1137 for dev in devs:
1138 if dev.dev_type == constants.LD_LV:
1139 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1140
1141 elif dev.dev_type in constants.LDS_DRBD:
1142 if dev.children:
1143 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1144 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1145
1146 elif dev.children:
1147 self.MapLVsByNode(lvmap, dev.children, node)
1148
1149 return ret
1150
1151 def FindDisk(self, idx):
1152 """Find a disk given having a specified index.
1153
1154 This is just a wrapper that does validation of the index.
1155
1156 @type idx: int
1157 @param idx: the disk index
1158 @rtype: L{Disk}
1159 @return: the corresponding disk
1160 @raise errors.OpPrereqError: when the given index is not valid
1161
1162 """
1163 try:
1164 idx = int(idx)
1165 return self.disks[idx]
1166 except (TypeError, ValueError), err:
1167 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1168 errors.ECODE_INVAL)
1169 except IndexError:
1170 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1171 " 0 to %d" % (idx, len(self.disks) - 1),
1172 errors.ECODE_INVAL)
1173
1174 def ToDict(self):
1175 """Instance-specific conversion to standard python types.
1176
1177 This replaces the children lists of objects with lists of standard
1178 python types.
1179
1180 """
1181 bo = super(Instance, self).ToDict()
1182
1183 for attr in "nics", "disks":
1184 alist = bo.get(attr, None)
1185 if alist:
1186 nlist = outils.ContainerToDicts(alist)
1187 else:
1188 nlist = []
1189 bo[attr] = nlist
1190 return bo
1191
1192 @classmethod
1193 def FromDict(cls, val):
1194 """Custom function for instances.
1195
1196 """
1197 if "admin_state" not in val:
1198 if val.get("admin_up", False):
1199 val["admin_state"] = constants.ADMINST_UP
1200 else:
1201 val["admin_state"] = constants.ADMINST_DOWN
1202 if "admin_up" in val:
1203 del val["admin_up"]
1204 obj = super(Instance, cls).FromDict(val)
1205 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1206 obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1207 return obj
1208
1209 def UpgradeConfig(self):
1210 """Fill defaults for missing configuration values.
1211
1212 """
1213 for nic in self.nics:
1214 nic.UpgradeConfig()
1215 for disk in self.disks:
1216 disk.UpgradeConfig()
1217 if self.hvparams:
1218 for key in constants.HVC_GLOBALS:
1219 try:
1220 del self.hvparams[key]
1221 except KeyError:
1222 pass
1223 if self.osparams is None:
1224 self.osparams = {}
1225 UpgradeBeParams(self.beparams)
1226 if self.disks_active is None:
1227 self.disks_active = self.admin_state == constants.ADMINST_UP
1228
1229
1230 class OS(ConfigObject):
1231 """Config object representing an operating system.
1232
1233 @type supported_parameters: list
1234 @ivar supported_parameters: a list of tuples, name and description,
1235 containing the supported parameters by this OS
1236
1237 @type VARIANT_DELIM: string
1238 @cvar VARIANT_DELIM: the variant delimiter
1239
1240 """
1241 __slots__ = [
1242 "name",
1243 "path",
1244 "api_versions",
1245 "create_script",
1246 "export_script",
1247 "import_script",
1248 "rename_script",
1249 "verify_script",
1250 "supported_variants",
1251 "supported_parameters",
1252 ]
1253
1254 VARIANT_DELIM = "+"
1255
1256 @classmethod
1257 def SplitNameVariant(cls, name):
1258 """Splits the name into the proper name and variant.
1259
1260 @param name: the OS (unprocessed) name
1261 @rtype: list
1262 @return: a list of two elements; if the original name didn't
1263 contain a variant, it's returned as an empty string
1264
1265 """
1266 nv = name.split(cls.VARIANT_DELIM, 1)
1267 if len(nv) == 1:
1268 nv.append("")
1269 return nv
1270
1271 @classmethod
1272 def GetName(cls, name):
1273 """Returns the proper name of the os (without the variant).
1274
1275 @param name: the OS (unprocessed) name
1276
1277 """
1278 return cls.SplitNameVariant(name)[0]
1279
1280 @classmethod
1281 def GetVariant(cls, name):
1282 """Returns the variant the os (without the base name).
1283
1284 @param name: the OS (unprocessed) name
1285
1286 """
1287 return cls.SplitNameVariant(name)[1]
1288
1289
1290 class ExtStorage(ConfigObject):
1291 """Config object representing an External Storage Provider.
1292
1293 """
1294 __slots__ = [
1295 "name",
1296 "path",
1297 "create_script",
1298 "remove_script",
1299 "grow_script",
1300 "attach_script",
1301 "detach_script",
1302 "setinfo_script",
1303 "verify_script",
1304 "supported_parameters",
1305 ]
1306
1307
1308 class NodeHvState(ConfigObject):
1309 """Hypvervisor state on a node.
1310
1311 @ivar mem_total: Total amount of memory
1312 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1313 available)
1314 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1315 rounding
1316 @ivar mem_inst: Memory used by instances living on node
1317 @ivar cpu_total: Total node CPU core count
1318 @ivar cpu_node: Number of CPU cores reserved for the node itself
1319
1320 """
1321 __slots__ = [
1322 "mem_total",
1323 "mem_node",
1324 "mem_hv",
1325 "mem_inst",
1326 "cpu_total",
1327 "cpu_node",
1328 ] + _TIMESTAMPS
1329
1330
1331 class NodeDiskState(ConfigObject):
1332 """Disk state on a node.
1333
1334 """
1335 __slots__ = [
1336 "total",
1337 "reserved",
1338 "overhead",
1339 ] + _TIMESTAMPS
1340
1341
1342 class Node(TaggableObject):
1343 """Config object representing a node.
1344
1345 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1346 @ivar hv_state_static: Hypervisor state overriden by user
1347 @ivar disk_state: Disk state (e.g. free space)
1348 @ivar disk_state_static: Disk state overriden by user
1349
1350 """
1351 __slots__ = [
1352 "name",
1353 "primary_ip",
1354 "secondary_ip",
1355 "serial_no",
1356 "master_candidate",
1357 "offline",
1358 "drained",
1359 "group",
1360 "master_capable",
1361 "vm_capable",
1362 "ndparams",
1363 "powered",
1364 "hv_state",
1365 "hv_state_static",
1366 "disk_state",
1367 "disk_state_static",
1368 ] + _TIMESTAMPS + _UUID
1369
1370 def UpgradeConfig(self):
1371 """Fill defaults for missing configuration values.
1372
1373 """
1374 # pylint: disable=E0203
1375 # because these are "defined" via slots, not manually
1376 if self.master_capable is None:
1377 self.master_capable = True
1378
1379 if self.vm_capable is None:
1380 self.vm_capable = True
1381
1382 if self.ndparams is None:
1383 self.ndparams = {}
1384 # And remove any global parameter
1385 for key in constants.NDC_GLOBALS:
1386 if key in self.ndparams:
1387 logging.warning("Ignoring %s node parameter for node %s",
1388 key, self.name)
1389 del self.ndparams[key]
1390
1391 if self.powered is None:
1392 self.powered = True
1393
1394 def ToDict(self):
1395 """Custom function for serializing.
1396
1397 """
1398 data = super(Node, self).ToDict()
1399
1400 hv_state = data.get("hv_state", None)
1401 if hv_state is not None:
1402 data["hv_state"] = outils.ContainerToDicts(hv_state)
1403
1404 disk_state = data.get("disk_state", None)
1405 if disk_state is not None:
1406 data["disk_state"] = \
1407 dict((key, outils.ContainerToDicts(value))
1408 for (key, value) in disk_state.items())
1409
1410 return data
1411
1412 @classmethod
1413 def FromDict(cls, val):
1414 """Custom function for deserializing.
1415
1416 """
1417 obj = super(Node, cls).FromDict(val)
1418
1419 if obj.hv_state is not None:
1420 obj.hv_state = \
1421 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1422
1423 if obj.disk_state is not None:
1424 obj.disk_state = \
1425 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1426 for (key, value) in obj.disk_state.items())
1427
1428 return obj
1429
1430
1431 class NodeGroup(TaggableObject):
1432 """Config object representing a node group."""
1433 __slots__ = [
1434 "name",
1435 "members",
1436 "ndparams",
1437 "diskparams",
1438 "ipolicy",
1439 "serial_no",
1440 "hv_state_static",
1441 "disk_state_static",
1442 "alloc_policy",
1443 "networks",
1444 ] + _TIMESTAMPS + _UUID
1445
1446 def ToDict(self):
1447 """Custom function for nodegroup.
1448
1449 This discards the members object, which gets recalculated and is only kept
1450 in memory.
1451
1452 """
1453 mydict = super(NodeGroup, self).ToDict()
1454 del mydict["members"]
1455 return mydict
1456
1457 @classmethod
1458 def FromDict(cls, val):
1459 """Custom function for nodegroup.
1460
1461 The members slot is initialized to an empty list, upon deserialization.
1462
1463 """
1464 obj = super(NodeGroup, cls).FromDict(val)
1465 obj.members = []
1466 return obj
1467
1468 def UpgradeConfig(self):
1469 """Fill defaults for missing configuration values.
1470
1471 """
1472 if self.ndparams is None:
1473 self.ndparams = {}
1474
1475 if self.serial_no is None:
1476 self.serial_no = 1
1477
1478 if self.alloc_policy is None:
1479 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1480
1481 # We only update mtime, and not ctime, since we would not be able
1482 # to provide a correct value for creation time.
1483 if self.mtime is None:
1484 self.mtime = time.time()
1485
1486 if self.diskparams is None:
1487 self.diskparams = {}
1488 if self.ipolicy is None:
1489 self.ipolicy = MakeEmptyIPolicy()
1490
1491 if self.networks is None:
1492 self.networks = {}
1493
1494 def FillND(self, node):
1495 """Return filled out ndparams for L{objects.Node}
1496
1497 @type node: L{objects.Node}
1498 @param node: A Node object to fill
1499 @return a copy of the node's ndparams with defaults filled
1500
1501 """
1502 return self.SimpleFillND(node.ndparams)
1503
1504 def SimpleFillND(self, ndparams):
1505 """Fill a given ndparams dict with defaults.
1506
1507 @type ndparams: dict
1508 @param ndparams: the dict to fill
1509 @rtype: dict
1510 @return: a copy of the passed in ndparams with missing keys filled
1511 from the node group defaults
1512
1513 """
1514 return FillDict(self.ndparams, ndparams)
1515
1516
1517 class Cluster(TaggableObject):
1518 """Config object representing the cluster."""
1519 __slots__ = [
1520 "serial_no",
1521 "rsahostkeypub",
1522 "dsahostkeypub",
1523 "highest_used_port",
1524 "tcpudp_port_pool",
1525 "mac_prefix",
1526 "volume_group_name",
1527 "reserved_lvs",
1528 "drbd_usermode_helper",
1529 "default_bridge",
1530 "default_hypervisor",
1531 "master_node",
1532 "master_ip",
1533 "master_netdev",
1534 "master_netmask",
1535 "use_external_mip_script",
1536 "cluster_name",
1537 "file_storage_dir",
1538 "shared_file_storage_dir",
1539 "enabled_hypervisors",
1540 "hvparams",
1541 "ipolicy",
1542 "os_hvp",
1543 "beparams",
1544 "osparams",
1545 "nicparams",
1546 "ndparams",
1547 "diskparams",
1548 "candidate_pool_size",
1549 "modify_etc_hosts",
1550 "modify_ssh_setup",
1551 "maintain_node_health",
1552 "uid_pool",
1553 "default_iallocator",
1554 "hidden_os",
1555 "blacklisted_os",
1556 "primary_ip_family",
1557 "prealloc_wipe_disks",
1558 "hv_state_static",
1559 "disk_state_static",
1560 "enabled_disk_templates",
1561 ] + _TIMESTAMPS + _UUID
1562
1563 def UpgradeConfig(self):
1564 """Fill defaults for missing configuration values.
1565
1566 """
1567 # pylint: disable=E0203
1568 # because these are "defined" via slots, not manually
1569 if self.hvparams is None:
1570 self.hvparams = constants.HVC_DEFAULTS
1571 else:
1572 for hypervisor in self.hvparams:
1573 self.hvparams[hypervisor] = FillDict(
1574 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1575
1576 if self.os_hvp is None:
1577 self.os_hvp = {}
1578
1579 # osparams added before 2.2
1580 if self.osparams is None:
1581 self.osparams = {}
1582
1583 self.ndparams = UpgradeNDParams(self.ndparams)
1584
1585 self.beparams = UpgradeGroupedParams(self.beparams,
1586 constants.BEC_DEFAULTS)
1587 for beparams_group in self.beparams:
1588 UpgradeBeParams(self.beparams[beparams_group])
1589
1590 migrate_default_bridge = not self.nicparams
1591 self.nicparams = UpgradeGroupedParams(self.nicparams,
1592 constants.NICC_DEFAULTS)
1593 if migrate_default_bridge:
1594 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1595 self.default_bridge
1596
1597 if self.modify_etc_hosts is None:
1598 self.modify_etc_hosts = True
1599
1600 if self.modify_ssh_setup is None:
1601 self.modify_ssh_setup = True
1602
1603 # default_bridge is no longer used in 2.1. The slot is left there to
1604 # support auto-upgrading. It can be removed once we decide to deprecate
1605 # upgrading straight from 2.0.
1606 if self.default_bridge is not None:
1607 self.default_bridge = None
1608
1609 # default_hypervisor is just the first enabled one in 2.1. This slot and
1610 # code can be removed once upgrading straight from 2.0 is deprecated.
1611 if self.default_hypervisor is not None:
1612 self.enabled_hypervisors = ([self.default_hypervisor] +
1613 [hvname for hvname in self.enabled_hypervisors
1614 if hvname != self.default_hypervisor])
1615 self.default_hypervisor = None
1616
1617 # maintain_node_health added after 2.1.1
1618 if self.maintain_node_health is None:
1619 self.maintain_node_health = False
1620
1621 if self.uid_pool is None:
1622 self.uid_pool = []
1623
1624 if self.default_iallocator is None:
1625 self.default_iallocator = ""
1626
1627 # reserved_lvs added before 2.2
1628 if self.reserved_lvs is None:
1629 self.reserved_lvs = []
1630
1631 # hidden and blacklisted operating systems added before 2.2.1
1632 if self.hidden_os is None:
1633 self.hidden_os = []
1634
1635 if self.blacklisted_os is None:
1636 self.blacklisted_os = []
1637
1638 # primary_ip_family added before 2.3
1639 if self.primary_ip_family is None:
1640 self.primary_ip_family = AF_INET
1641
1642 if self.master_netmask is None:
1643 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1644 self.master_netmask = ipcls.iplen
1645
1646 if self.prealloc_wipe_disks is None:
1647 self.prealloc_wipe_disks = False
1648
1649 # shared_file_storage_dir added before 2.5
1650 if self.shared_file_storage_dir is None:
1651 self.shared_file_storage_dir = ""
1652
1653 if self.use_external_mip_script is None:
1654 self.use_external_mip_script = False
1655
1656 if self.diskparams:
1657 self.diskparams = UpgradeDiskParams(self.diskparams)
1658 else:
1659 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1660
1661 # instance policy added before 2.6
1662 if self.ipolicy is None:
1663 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1664 else:
1665 # we can either make sure to upgrade the ipolicy always, or only
1666 # do it in some corner cases (e.g. missing keys); note that this
1667 # will break any removal of keys from the ipolicy dict
1668 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1669 if wrongkeys:
1670 # These keys would be silently removed by FillIPolicy()
1671 msg = ("Cluster instance policy contains spurious keys: %s" %
1672 utils.CommaJoin(wrongkeys))
1673 raise errors.ConfigurationError(msg)
1674 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1675
1676 @property
1677 def primary_hypervisor(self):
1678 """The first hypervisor is the primary.
1679
1680 Useful, for example, for L{Node}'s hv/disk state.
1681
1682 """
1683 return self.enabled_hypervisors[0]
1684
1685 def ToDict(self):
1686 """Custom function for cluster.
1687
1688 """
1689 mydict = super(Cluster, self).ToDict()
1690
1691 if self.tcpudp_port_pool is None:
1692 tcpudp_port_pool = []
1693 else:
1694 tcpudp_port_pool = list(self.tcpudp_port_pool)
1695
1696 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1697
1698 return mydict
1699
1700 @classmethod
1701 def FromDict(cls, val):
1702 """Custom function for cluster.
1703
1704 """
1705 obj = super(Cluster, cls).FromDict(val)
1706
1707 if obj.tcpudp_port_pool is None:
1708 obj.tcpudp_port_pool = set()
1709 elif not isinstance(obj.tcpudp_port_pool, set):
1710 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1711
1712 return obj
1713
1714 def SimpleFillDP(self, diskparams):
1715 """Fill a given diskparams dict with cluster defaults.
1716
1717 @param diskparams: The diskparams
1718 @return: The defaults dict
1719
1720 """
1721 return FillDiskParams(self.diskparams, diskparams)
1722
1723 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1724 """Get the default hypervisor parameters for the cluster.
1725
1726 @param hypervisor: the hypervisor name
1727 @param os_name: if specified, we'll also update the defaults for this OS
1728 @param skip_keys: if passed, list of keys not to use
1729 @return: the defaults dict
1730
1731 """
1732 if skip_keys is None:
1733 skip_keys = []
1734
1735 fill_stack = [self.hvparams.get(hypervisor, {})]
1736 if os_name is not None:
1737 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1738 fill_stack.append(os_hvp)
1739
1740 ret_dict = {}
1741 for o_dict in fill_stack:
1742 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1743
1744 return ret_dict
1745
1746 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1747 """Fill a given hvparams dict with cluster defaults.
1748
1749 @type hv_name: string
1750 @param hv_name: the hypervisor to use
1751 @type os_name: string
1752 @param os_name: the OS to use for overriding the hypervisor defaults
1753 @type skip_globals: boolean
1754 @param skip_globals: if True, the global hypervisor parameters will
1755 not be filled
1756 @rtype: dict
1757 @return: a copy of the given hvparams with missing keys filled from
1758 the cluster defaults
1759
1760 """
1761 if skip_globals:
1762 skip_keys = constants.HVC_GLOBALS
1763 else:
1764 skip_keys = []
1765
1766 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1767 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1768
1769 def FillHV(self, instance, skip_globals=False):
1770 """Fill an instance's hvparams dict with cluster defaults.
1771
1772 @type instance: L{objects.Instance}
1773 @param instance: the instance parameter to fill
1774 @type skip_globals: boolean
1775 @param skip_globals: if True, the global hypervisor parameters will
1776 not be filled
1777 @rtype: dict
1778 @return: a copy of the instance's hvparams with missing keys filled from
1779 the cluster defaults
1780
1781 """
1782 return self.SimpleFillHV(instance.hypervisor, instance.os,
1783 instance.hvparams, skip_globals)
1784
1785 def SimpleFillBE(self, beparams):
1786 """Fill a given beparams dict with cluster defaults.
1787
1788 @type beparams: dict
1789 @param beparams: the dict to fill
1790 @rtype: dict
1791 @return: a copy of the passed in beparams with missing keys filled
1792 from the cluster defaults
1793
1794 """
1795 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1796
1797 def FillBE(self, instance):
1798 """Fill an instance's beparams dict with cluster defaults.
1799
1800 @type instance: L{objects.Instance}
1801 @param instance: the instance parameter to fill
1802 @rtype: dict
1803 @return: a copy of the instance's beparams with missing keys filled from
1804 the cluster defaults
1805
1806 """
1807 return self.SimpleFillBE(instance.beparams)
1808
1809 def SimpleFillNIC(self, nicparams):
1810 """Fill a given nicparams dict with cluster defaults.
1811
1812 @type nicparams: dict
1813 @param nicparams: the dict to fill
1814 @rtype: dict
1815 @return: a copy of the passed in nicparams with missing keys filled
1816 from the cluster defaults
1817
1818 """
1819 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1820
1821 def SimpleFillOS(self, os_name, os_params):
1822 """Fill an instance's osparams dict with cluster defaults.
1823
1824 @type os_name: string
1825 @param os_name: the OS name to use
1826 @type os_params: dict
1827 @param os_params: the dict to fill with default values
1828 @rtype: dict
1829 @return: a copy of the instance's osparams with missing keys filled from
1830 the cluster defaults
1831
1832 """
1833 name_only = os_name.split("+", 1)[0]
1834 # base OS
1835 result = self.osparams.get(name_only, {})
1836 # OS with variant
1837 result = FillDict(result, self.osparams.get(os_name, {}))
1838 # specified params
1839 return FillDict(result, os_params)
1840
1841 @staticmethod
1842 def SimpleFillHvState(hv_state):
1843 """Fill an hv_state sub dict with cluster defaults.
1844
1845 """
1846 return FillDict(constants.HVST_DEFAULTS, hv_state)
1847
1848 @staticmethod
1849 def SimpleFillDiskState(disk_state):
1850 """Fill an disk_state sub dict with cluster defaults.
1851
1852 """
1853 return FillDict(constants.DS_DEFAULTS, disk_state)
1854
1855 def FillND(self, node, nodegroup):
1856 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1857
1858 @type node: L{objects.Node}
1859 @param node: A Node object to fill
1860 @type nodegroup: L{objects.NodeGroup}
1861 @param nodegroup: A Node object to fill
1862 @return a copy of the node's ndparams with defaults filled
1863
1864 """
1865 return self.SimpleFillND(nodegroup.FillND(node))
1866
1867 def SimpleFillND(self, ndparams):
1868 """Fill a given ndparams dict with defaults.
1869
1870 @type ndparams: dict
1871 @param ndparams: the dict to fill
1872 @rtype: dict
1873 @return: a copy of the passed in ndparams with missing keys filled
1874 from the cluster defaults
1875
1876 """
1877 return FillDict(self.ndparams, ndparams)
1878
1879 def SimpleFillIPolicy(self, ipolicy):
1880 """ Fill instance policy dict with defaults.
1881
1882 @type ipolicy: dict
1883 @param ipolicy: the dict to fill
1884 @rtype: dict
1885 @return: a copy of passed ipolicy with missing keys filled from
1886 the cluster defaults
1887
1888 """
1889 return FillIPolicy(self.ipolicy, ipolicy)
1890
1891
1892 class BlockDevStatus(ConfigObject):
1893 """Config object representing the status of a block device."""
1894 __slots__ = [
1895 "dev_path",
1896 "major",
1897 "minor",
1898 "sync_percent",
1899 "estimated_time",
1900 "is_degraded",
1901 "ldisk_status",
1902 ]
1903
1904
1905 class ImportExportStatus(ConfigObject):
1906 """Config object representing the status of an import or export."""
1907 __slots__ = [
1908 "recent_output",
1909 "listen_port",
1910 "connected",
1911 "progress_mbytes",
1912 "progress_throughput",
1913 "progress_eta",
1914 "progress_percent",
1915 "exit_status",
1916 "error_message",
1917 ] + _TIMESTAMPS
1918
1919
1920 class ImportExportOptions(ConfigObject):
1921 """Options for import/export daemon
1922
1923 @ivar key_name: X509 key name (None for cluster certificate)
1924 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1925 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1926 @ivar magic: Used to ensure the connection goes to the right disk
1927 @ivar ipv6: Whether to use IPv6
1928 @ivar connect_timeout: Number of seconds for establishing connection
1929
1930 """
1931 __slots__ = [
1932 "key_name",
1933 "ca_pem",
1934 "compress",
1935 "magic",
1936 "ipv6",
1937 "connect_timeout",
1938 ]
1939
1940
1941 class ConfdRequest(ConfigObject):
1942 """Object holding a confd request.
1943
1944 @ivar protocol: confd protocol version
1945 @ivar type: confd query type
1946 @ivar query: query request
1947 @ivar rsalt: requested reply salt
1948
1949 """
1950 __slots__ = [
1951 "protocol",
1952 "type",
1953 "query",
1954 "rsalt",
1955 ]
1956
1957
1958 class ConfdReply(ConfigObject):
1959 """Object holding a confd reply.
1960
1961 @ivar protocol: confd protocol version
1962 @ivar status: reply status code (ok, error)
1963 @ivar answer: confd query reply
1964 @ivar serial: configuration serial number
1965
1966 """
1967 __slots__ = [
1968 "protocol",
1969 "status",
1970 "answer",
1971 "serial",
1972 ]
1973
1974
1975 class QueryFieldDefinition(ConfigObject):
1976 """Object holding a query field definition.
1977
1978 @ivar name: Field name
1979 @ivar title: Human-readable title
1980 @ivar kind: Field type
1981 @ivar doc: Human-readable description
1982
1983 """
1984 __slots__ = [
1985 "name",
1986 "title",
1987 "kind",
1988 "doc",
1989 ]
1990
1991
1992 class _QueryResponseBase(ConfigObject):
1993 __slots__ = [
1994 "fields",
1995 ]
1996
1997 def ToDict(self):
1998 """Custom function for serializing.
1999
2000 """
2001 mydict = super(_QueryResponseBase, self).ToDict()
2002 mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2003 return mydict
2004
2005 @classmethod
2006 def FromDict(cls, val):
2007 """Custom function for de-serializing.
2008
2009 """
2010 obj = super(_QueryResponseBase, cls).FromDict(val)
2011 obj.fields = \
2012 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2013 return obj
2014
2015
2016 class QueryResponse(_QueryResponseBase):
2017 """Object holding the response to a query.
2018
2019 @ivar fields: List of L{QueryFieldDefinition} objects
2020 @ivar data: Requested data
2021
2022 """
2023 __slots__ = [
2024 "data",
2025 ]
2026
2027
2028 class QueryFieldsRequest(ConfigObject):
2029 """Object holding a request for querying available fields.
2030
2031 """
2032 __slots__ = [
2033 "what",
2034 "fields",
2035 ]
2036
2037
2038 class QueryFieldsResponse(_QueryResponseBase):
2039 """Object holding the response to a query for fields.
2040
2041 @ivar fields: List of L{QueryFieldDefinition} objects
2042
2043 """
2044 __slots__ = []
2045
2046
2047 class MigrationStatus(ConfigObject):
2048 """Object holding the status of a migration.
2049
2050 """
2051 __slots__ = [
2052 "status",
2053 "transferred_ram",
2054 "total_ram",
2055 ]
2056
2057
2058 class InstanceConsole(ConfigObject):
2059 """Object describing how to access the console of an instance.
2060
2061 """
2062 __slots__ = [
2063 "instance",
2064 "kind",
2065 "message",
2066 "host",
2067 "port",
2068 "user",
2069 "command",
2070 "display",
2071 ]
2072
2073 def Validate(self):
2074 """Validates contents of this object.
2075
2076 """
2077 assert self.kind in constants.CONS_ALL, "Unknown console type"
2078 assert self.instance, "Missing instance name"
2079 assert self.message or self.kind in [constants.CONS_SSH,
2080 constants.CONS_SPICE,
2081 constants.CONS_VNC]
2082 assert self.host or self.kind == constants.CONS_MESSAGE
2083 assert self.port or self.kind in [constants.CONS_MESSAGE,
2084 constants.CONS_SSH]
2085 assert self.user or self.kind in [constants.CONS_MESSAGE,
2086 constants.CONS_SPICE,
2087 constants.CONS_VNC]
2088 assert self.command or self.kind in [constants.CONS_MESSAGE,
2089 constants.CONS_SPICE,
2090 constants.CONS_VNC]
2091 assert self.display or self.kind in [constants.CONS_MESSAGE,
2092 constants.CONS_SPICE,
2093 constants.CONS_SSH]
2094 return True
2095
2096
2097 class Network(TaggableObject):
2098 """Object representing a network definition for ganeti.
2099
2100 """
2101 __slots__ = [
2102 "name",
2103 "serial_no",
2104 "mac_prefix",
2105 "network",
2106 "network6",
2107 "gateway",
2108 "gateway6",
2109 "reservations",
2110 "ext_reservations",
2111 ] + _TIMESTAMPS + _UUID
2112
2113 def HooksDict(self, prefix=""):
2114 """Export a dictionary used by hooks with a network's information.
2115
2116 @type prefix: String
2117 @param prefix: Prefix to prepend to the dict entries
2118
2119 """
2120 result = {
2121 "%sNETWORK_NAME" % prefix: self.name,
2122 "%sNETWORK_UUID" % prefix: self.uuid,
2123 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2124 }
2125 if self.network:
2126 result["%sNETWORK_SUBNET" % prefix] = self.network
2127 if self.gateway:
2128 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2129 if self.network6:
2130 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2131 if self.gateway6:
2132 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2133 if self.mac_prefix:
2134 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2135
2136 return result
2137
2138 @classmethod
2139 def FromDict(cls, val):
2140 """Custom function for networks.
2141
2142 Remove deprecated network_type and family.
2143
2144 """
2145 if "network_type" in val:
2146 del val["network_type"]
2147 if "family" in val:
2148 del val["family"]
2149 obj = super(Network, cls).FromDict(val)
2150 return obj
2151
2152
2153 class SerializableConfigParser(ConfigParser.SafeConfigParser):
2154 """Simple wrapper over ConfigParse that allows serialization.
2155
2156 This class is basically ConfigParser.SafeConfigParser with two
2157 additional methods that allow it to serialize/unserialize to/from a
2158 buffer.
2159
2160 """
2161 def Dumps(self):
2162 """Dump this instance and return the string representation."""
2163 buf = StringIO()
2164 self.write(buf)
2165 return buf.getvalue()
2166
2167 @classmethod
2168 def Loads(cls, data):
2169 """Load data from a string."""
2170 buf = StringIO(data)
2171 cfp = cls()
2172 cfp.readfp(buf)
2173 return cfp
2174
2175
2176 class LvmPvInfo(ConfigObject):
2177 """Information about an LVM physical volume (PV).
2178
2179 @type name: string
2180 @ivar name: name of the PV
2181 @type vg_name: string
2182 @ivar vg_name: name of the volume group containing the PV
2183 @type size: float
2184 @ivar size: size of the PV in MiB
2185 @type free: float
2186 @ivar free: free space in the PV, in MiB
2187 @type attributes: string
2188 @ivar attributes: PV attributes
2189 @type lv_list: list of strings
2190 @ivar lv_list: names of the LVs hosted on the PV
2191 """
2192 __slots__ = [
2193 "name",
2194 "vg_name",
2195 "size",
2196 "free",
2197 "attributes",
2198 "lv_list"
2199 ]
2200
2201 def IsEmpty(self):
2202 """Is this PV empty?
2203
2204 """
2205 return self.size <= (self.free + 1)
2206
2207 def IsAllocatable(self):
2208 """Is this PV allocatable?
2209
2210 """
2211 return ("a" in self.attributes)