4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable=E0203,W0201,R0902
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitly initialise its members
34 # W0201: Attribute '%s' defined outside __init__
36 # R0902: Allow instances of these objects to have more than 20 attributes
43 from cStringIO
import StringIO
45 from ganeti
import errors
46 from ganeti
import constants
47 from ganeti
import netutils
48 from ganeti
import outils
49 from ganeti
import utils
51 from socket
import AF_INET
54 __all__
= ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
57 _TIMESTAMPS
= ["ctime", "mtime"]
61 def FillDict(defaults_dict
, custom_dict
, skip_keys
=None):
62 """Basic function to apply settings on top a default dict.
64 @type defaults_dict: dict
65 @param defaults_dict: dictionary holding the default values
66 @type custom_dict: dict
67 @param custom_dict: dictionary holding customized value
69 @param skip_keys: which keys not to fill
71 @return: dict with the 'full' values
74 ret_dict
= copy
.deepcopy(defaults_dict
)
75 ret_dict
.update(custom_dict
)
85 def FillIPolicy(default_ipolicy
, custom_ipolicy
):
86 """Fills an instance policy with defaults.
89 assert frozenset(default_ipolicy
.keys()) == constants
.IPOLICY_ALL_KEYS
90 ret_dict
= copy
.deepcopy(custom_ipolicy
)
91 for key
in default_ipolicy
:
92 if key
not in ret_dict
:
93 ret_dict
[key
] = copy
.deepcopy(default_ipolicy
[key
])
94 elif key
== constants
.ISPECS_STD
:
95 ret_dict
[key
] = FillDict(default_ipolicy
[key
], ret_dict
[key
])
99 def FillDiskParams(default_dparams
, custom_dparams
, skip_keys
=None):
100 """Fills the disk parameter defaults.
102 @see: L{FillDict} for parameters and return value
105 assert frozenset(default_dparams
.keys()) == constants
.DISK_TEMPLATES
107 return dict((dt
, FillDict(default_dparams
[dt
], custom_dparams
.get(dt
, {}),
108 skip_keys
=skip_keys
))
109 for dt
in constants
.DISK_TEMPLATES
)
112 def UpgradeGroupedParams(target
, defaults
):
113 """Update all groups for the target parameter.
115 @type target: dict of dicts
116 @param target: {group: {parameter: value}}
118 @param defaults: default parameter values
122 target
= {constants
.PP_DEFAULT
: defaults
}
125 target
[group
] = FillDict(defaults
, target
[group
])
129 def UpgradeBeParams(target
):
130 """Update the be parameters dict to the new format.
133 @param target: "be" parameters dict
136 if constants
.BE_MEMORY
in target
:
137 memory
= target
[constants
.BE_MEMORY
]
138 target
[constants
.BE_MAXMEM
] = memory
139 target
[constants
.BE_MINMEM
] = memory
140 del target
[constants
.BE_MEMORY
]
143 def UpgradeDiskParams(diskparams
):
144 """Upgrade the disk parameters.
146 @type diskparams: dict
147 @param diskparams: disk parameters to upgrade
149 @return: the upgraded disk parameters dict
155 result
= FillDiskParams(constants
.DISK_DT_DEFAULTS
, diskparams
)
160 def UpgradeNDParams(ndparams
):
161 """Upgrade ndparams structure.
164 @param ndparams: disk parameters to upgrade
166 @return: the upgraded node parameters dict
172 if (constants
.ND_OOB_PROGRAM
in ndparams
and
173 ndparams
[constants
.ND_OOB_PROGRAM
] is None):
174 # will be reset by the line below
175 del ndparams
[constants
.ND_OOB_PROGRAM
]
176 return FillDict(constants
.NDC_DEFAULTS
, ndparams
)
179 def MakeEmptyIPolicy():
180 """Create empty IPolicy dictionary.
186 class ConfigObject(outils
.ValidatedSlots
):
187 """A generic config object.
189 It has the following properties:
191 - provides somewhat safe recursive unpickling and pickling for its classes
192 - unset attributes which are defined in slots are always returned
193 as None instead of raising an error
195 Classes derived from this must always declare __slots__ (we use many
196 config objects and the memory reduction is useful)
201 def __getattr__(self
, name
):
202 if name
not in self
.GetAllSlots():
203 raise AttributeError("Invalid object attribute %s.%s" %
204 (type(self
).__name__
, name
))
207 def __setstate__(self
, state
):
208 slots
= self
.GetAllSlots()
211 setattr(self
, name
, state
[name
])
214 """Validates the slots.
219 """Convert to a dict holding only standard python types.
221 The generic routine just dumps all of this object's attributes in
222 a dict. It does not work if the class has children who are
223 ConfigObjects themselves (e.g. the nics list in an Instance), in
224 which case the object should subclass the function in order to
225 make sure all objects returned are only standard python types.
229 for name
in self
.GetAllSlots():
230 value
= getattr(self
, name
, None)
231 if value
is not None:
235 __getstate__
= ToDict
238 def FromDict(cls
, val
):
239 """Create an object from a dictionary.
241 This generic routine takes a dict, instantiates a new instance of
242 the given class, and sets attributes based on the dict content.
244 As for `ToDict`, this does not work if the class has children
245 who are ConfigObjects themselves (e.g. the nics list in an
246 Instance), in which case the object should subclass the function
247 and alter the objects.
250 if not isinstance(val
, dict):
251 raise errors
.ConfigurationError("Invalid object passed to FromDict:"
252 " expected dict, got %s" % type(val
))
253 val_str
= dict([(str(k
), v
) for k
, v
in val
.iteritems()])
254 obj
= cls(**val_str
) # pylint: disable=W0142
258 """Makes a deep copy of the current object and its children.
261 dict_form
= self
.ToDict()
262 clone_obj
= self
.__class__
.FromDict(dict_form
)
266 """Implement __repr__ for ConfigObjects."""
267 return repr(self
.ToDict())
269 def UpgradeConfig(self
):
270 """Fill defaults for missing configuration values.
272 This method will be called at configuration load time, and its
273 implementation will be object dependent.
279 class TaggableObject(ConfigObject
):
280 """An generic class supporting tags.
284 VALID_TAG_RE
= re
.compile("^[\w.+*/:@-]+$")
287 def ValidateTag(cls
, tag
):
288 """Check if a tag is valid.
290 If the tag is invalid, an errors.TagError will be raised. The
291 function has no return value.
294 if not isinstance(tag
, basestring
):
295 raise errors
.TagError("Invalid tag type (not a string)")
296 if len(tag
) > constants
.MAX_TAG_LEN
:
297 raise errors
.TagError("Tag too long (>%d characters)" %
298 constants
.MAX_TAG_LEN
)
300 raise errors
.TagError("Tags cannot be empty")
301 if not cls
.VALID_TAG_RE
.match(tag
):
302 raise errors
.TagError("Tag contains invalid characters")
305 """Return the tags list.
308 tags
= getattr(self
, "tags", None)
310 tags
= self
.tags
= set()
313 def AddTag(self
, tag
):
317 self
.ValidateTag(tag
)
318 tags
= self
.GetTags()
319 if len(tags
) >= constants
.MAX_TAGS_PER_OBJ
:
320 raise errors
.TagError("Too many tags")
321 self
.GetTags().add(tag
)
323 def RemoveTag(self
, tag
):
327 self
.ValidateTag(tag
)
328 tags
= self
.GetTags()
332 raise errors
.TagError("Tag not found")
335 """Taggable-object-specific conversion to standard python types.
337 This replaces the tags set with a list.
340 bo
= super(TaggableObject
, self
).ToDict()
342 tags
= bo
.get("tags", None)
343 if isinstance(tags
, set):
344 bo
["tags"] = list(tags
)
348 def FromDict(cls
, val
):
349 """Custom function for instances.
352 obj
= super(TaggableObject
, cls
).FromDict(val
)
353 if hasattr(obj
, "tags") and isinstance(obj
.tags
, list):
354 obj
.tags
= set(obj
.tags
)
358 class MasterNetworkParameters(ConfigObject
):
359 """Network configuration parameters for the master
361 @ivar name: master name
363 @ivar netmask: master netmask
364 @ivar netdev: master network device
365 @ivar ip_family: master IP family
377 class ConfigData(ConfigObject
):
378 """Top-level config object."""
390 """Custom function for top-level config data.
392 This just replaces the list of instances, nodes and the cluster
393 with standard python types.
396 mydict
= super(ConfigData
, self
).ToDict()
397 mydict
["cluster"] = mydict
["cluster"].ToDict()
398 for key
in "nodes", "instances", "nodegroups", "networks":
399 mydict
[key
] = outils
.ContainerToDicts(mydict
[key
])
404 def FromDict(cls
, val
):
405 """Custom function for top-level config data
408 obj
= super(ConfigData
, cls
).FromDict(val
)
409 obj
.cluster
= Cluster
.FromDict(obj
.cluster
)
410 obj
.nodes
= outils
.ContainerFromDicts(obj
.nodes
, dict, Node
)
412 outils
.ContainerFromDicts(obj
.instances
, dict, Instance
)
414 outils
.ContainerFromDicts(obj
.nodegroups
, dict, NodeGroup
)
415 obj
.networks
= outils
.ContainerFromDicts(obj
.networks
, dict, Network
)
418 def HasAnyDiskOfType(self
, dev_type
):
419 """Check if in there is at disk of the given type in the configuration.
421 @type dev_type: L{constants.LDS_BLOCK}
422 @param dev_type: the type to look for
424 @return: boolean indicating if a disk of the given type was found or not
427 for instance
in self
.instances
.values():
428 for disk
in instance
.disks
:
429 if disk
.IsBasedOnDiskType(dev_type
):
433 def UpgradeConfig(self
):
434 """Fill defaults for missing configuration values.
437 self
.cluster
.UpgradeConfig()
438 for node
in self
.nodes
.values():
440 for instance
in self
.instances
.values():
441 instance
.UpgradeConfig()
442 if self
.nodegroups
is None:
444 for nodegroup
in self
.nodegroups
.values():
445 nodegroup
.UpgradeConfig()
446 if self
.cluster
.drbd_usermode_helper
is None:
447 # To decide if we set an helper let's check if at least one instance has
448 # a DRBD disk. This does not cover all the possible scenarios but it
449 # gives a good approximation.
450 if self
.HasAnyDiskOfType(constants
.LD_DRBD8
):
451 self
.cluster
.drbd_usermode_helper
= constants
.DEFAULT_DRBD_HELPER
452 if self
.networks
is None:
454 for network
in self
.networks
.values():
455 network
.UpgradeConfig()
456 self
._UpgradeEnabledDiskTemplates()
458 def _UpgradeEnabledDiskTemplates(self
):
459 """Upgrade the cluster's enabled disk templates by inspecting the currently
460 enabled and/or used disk templates.
463 # enabled_disk_templates in the cluster config were introduced in 2.8.
464 # Remove this code once upgrading from earlier versions is deprecated.
465 if not self
.cluster
.enabled_disk_templates
:
467 set([inst
.disk_template
for inst
in self
.instances
.values()])
468 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469 if self
.cluster
.volume_group_name
:
470 template_set
.add(constants
.DT_DRBD8
)
471 template_set
.add(constants
.DT_PLAIN
)
472 # FIXME: Adapt this when dis/enabling at configure time is removed.
473 # Enable 'file' and 'sharedfile', if they are enabled, even though they
474 # might currently not be used.
475 if constants
.ENABLE_FILE_STORAGE
:
476 template_set
.add(constants
.DT_FILE
)
477 if constants
.ENABLE_SHARED_FILE_STORAGE
:
478 template_set
.add(constants
.DT_SHARED_FILE
)
479 # Set enabled_disk_templates to the inferred disk templates. Order them
480 # according to a preference list that is based on Ganeti's history of
481 # supported disk templates.
482 self
.cluster
.enabled_disk_templates
= []
483 for preferred_template
in constants
.DISK_TEMPLATE_PREFERENCE
:
484 if preferred_template
in template_set
:
485 self
.cluster
.enabled_disk_templates
.append(preferred_template
)
486 template_set
.remove(preferred_template
)
487 self
.cluster
.enabled_disk_templates
.extend(list(template_set
))
490 class NIC(ConfigObject
):
491 """Config object representing a network card."""
492 __slots__
= ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
495 def CheckParameterSyntax(cls
, nicparams
):
496 """Check the given parameters for validity.
498 @type nicparams: dict
499 @param nicparams: dictionary with parameter names/value
500 @raise errors.ConfigurationError: when a parameter is not valid
503 mode
= nicparams
[constants
.NIC_MODE
]
504 if (mode
not in constants
.NIC_VALID_MODES
and
505 mode
!= constants
.VALUE_AUTO
):
506 raise errors
.ConfigurationError("Invalid NIC mode '%s'" % mode
)
508 if (mode
== constants
.NIC_MODE_BRIDGED
and
509 not nicparams
[constants
.NIC_LINK
]):
510 raise errors
.ConfigurationError("Missing bridged NIC link")
513 class Disk(ConfigObject
):
514 """Config object representing a block device."""
515 __slots__
= ["name", "dev_type", "logical_id", "physical_id",
516 "children", "iv_name", "size", "mode", "params"] + _UUID
518 def CreateOnSecondary(self
):
519 """Test if this device needs to be created on a secondary node."""
520 return self
.dev_type
in (constants
.LD_DRBD8
, constants
.LD_LV
)
522 def AssembleOnSecondary(self
):
523 """Test if this device needs to be assembled on a secondary node."""
524 return self
.dev_type
in (constants
.LD_DRBD8
, constants
.LD_LV
)
526 def OpenOnSecondary(self
):
527 """Test if this device needs to be opened on a secondary node."""
528 return self
.dev_type
in (constants
.LD_LV
,)
530 def StaticDevPath(self
):
531 """Return the device path if this device type has a static one.
533 Some devices (LVM for example) live always at the same /dev/ path,
534 irrespective of their status. For such devices, we return this
535 path, for others we return None.
537 @warning: The path returned is not a normalized pathname; callers
538 should check that it is a valid path.
541 if self
.dev_type
== constants
.LD_LV
:
542 return "/dev/%s/%s" % (self
.logical_id
[0], self
.logical_id
[1])
543 elif self
.dev_type
== constants
.LD_BLOCKDEV
:
544 return self
.logical_id
[1]
545 elif self
.dev_type
== constants
.LD_RBD
:
546 return "/dev/%s/%s" % (self
.logical_id
[0], self
.logical_id
[1])
549 def ChildrenNeeded(self
):
550 """Compute the needed number of children for activation.
552 This method will return either -1 (all children) or a positive
553 number denoting the minimum number of children needed for
554 activation (only mirrored devices will usually return >=0).
556 Currently, only DRBD8 supports diskless activation (therefore we
557 return 0), for all other we keep the previous semantics and return
561 if self
.dev_type
== constants
.LD_DRBD8
:
565 def IsBasedOnDiskType(self
, dev_type
):
566 """Check if the disk or its children are based on the given type.
568 @type dev_type: L{constants.LDS_BLOCK}
569 @param dev_type: the type to look for
571 @return: boolean indicating if a device of the given type was found or not
575 for child
in self
.children
:
576 if child
.IsBasedOnDiskType(dev_type
):
578 return self
.dev_type
== dev_type
580 def GetNodes(self
, node
):
581 """This function returns the nodes this device lives on.
583 Given the node on which the parent of the device lives on (or, in
584 case of a top-level device, the primary node of the devices'
585 instance), this function will return a list of nodes on which this
586 devices needs to (or can) be assembled.
589 if self
.dev_type
in [constants
.LD_LV
, constants
.LD_FILE
,
590 constants
.LD_BLOCKDEV
, constants
.LD_RBD
,
593 elif self
.dev_type
in constants
.LDS_DRBD
:
594 result
= [self
.logical_id
[0], self
.logical_id
[1]]
595 if node
not in result
:
596 raise errors
.ConfigurationError("DRBD device passed unknown node")
598 raise errors
.ProgrammerError("Unhandled device type %s" % self
.dev_type
)
601 def ComputeNodeTree(self
, parent_node
):
602 """Compute the node/disk tree for this disk and its children.
604 This method, given the node on which the parent disk lives, will
605 return the list of all (node, disk) pairs which describe the disk
606 tree in the most compact way. For example, a drbd/lvm stack
607 will be returned as (primary_node, drbd) and (secondary_node, drbd)
608 which represents all the top-level devices on the nodes.
611 my_nodes
= self
.GetNodes(parent_node
)
612 result
= [(node
, self
) for node
in my_nodes
]
613 if not self
.children
:
616 for node
in my_nodes
:
617 for child
in self
.children
:
618 child_result
= child
.ComputeNodeTree(node
)
619 if len(child_result
) == 1:
620 # child (and all its descendants) is simple, doesn't split
621 # over multiple hosts, so we don't need to describe it, our
622 # own entry for this node describes it completely
625 # check if child nodes differ from my nodes; note that
626 # subdisk can differ from the child itself, and be instead
627 # one of its descendants
628 for subnode
, subdisk
in child_result
:
629 if subnode
not in my_nodes
:
630 result
.append((subnode
, subdisk
))
631 # otherwise child is under our own node, so we ignore this
632 # entry (but probably the other results in the list will
636 def ComputeGrowth(self
, amount
):
637 """Compute the per-VG growth requirements.
639 This only works for VG-based disks.
641 @type amount: integer
642 @param amount: the desired increase in (user-visible) disk space
644 @return: a dictionary of volume-groups and the required size
647 if self
.dev_type
== constants
.LD_LV
:
648 return {self
.logical_id
[0]: amount
}
649 elif self
.dev_type
== constants
.LD_DRBD8
:
651 return self
.children
[0].ComputeGrowth(amount
)
655 # Other disk types do not require VG space
658 def RecordGrow(self
, amount
):
659 """Update the size of this disk after growth.
661 This method recurses over the disks's children and updates their
662 size correspondigly. The method needs to be kept in sync with the
663 actual algorithms from bdev.
666 if self
.dev_type
in (constants
.LD_LV
, constants
.LD_FILE
,
667 constants
.LD_RBD
, constants
.LD_EXT
):
669 elif self
.dev_type
== constants
.LD_DRBD8
:
671 self
.children
[0].RecordGrow(amount
)
674 raise errors
.ProgrammerError("Disk.RecordGrow called for unsupported"
675 " disk type %s" % self
.dev_type
)
677 def Update(self
, size
=None, mode
=None):
678 """Apply changes to size and mode.
681 if self
.dev_type
== constants
.LD_DRBD8
:
683 self
.children
[0].Update(size
=size
, mode
=mode
)
685 assert not self
.children
693 """Sets recursively the size to zero for the disk and its children.
697 for child
in self
.children
:
701 def SetPhysicalID(self
, target_node
, nodes_ip
):
702 """Convert the logical ID to the physical ID.
704 This is used only for drbd, which needs ip/port configuration.
706 The routine descends down and updates its children also, because
707 this helps when the only the top device is passed to the remote
711 - target_node: the node we wish to configure for
712 - nodes_ip: a mapping of node name to ip
714 The target_node must exist in in nodes_ip, and must be one of the
715 nodes in the logical ID for each of the DRBD devices encountered
720 for child
in self
.children
:
721 child
.SetPhysicalID(target_node
, nodes_ip
)
723 if self
.logical_id
is None and self
.physical_id
is not None:
725 if self
.dev_type
in constants
.LDS_DRBD
:
726 pnode
, snode
, port
, pminor
, sminor
, secret
= self
.logical_id
727 if target_node
not in (pnode
, snode
):
728 raise errors
.ConfigurationError("DRBD device not knowing node %s" %
730 pnode_ip
= nodes_ip
.get(pnode
, None)
731 snode_ip
= nodes_ip
.get(snode
, None)
732 if pnode_ip
is None or snode_ip
is None:
733 raise errors
.ConfigurationError("Can't find primary or secondary node"
734 " for %s" % str(self
))
735 p_data
= (pnode_ip
, port
)
736 s_data
= (snode_ip
, port
)
737 if pnode
== target_node
:
738 self
.physical_id
= p_data
+ s_data
+ (pminor
, secret
)
739 else: # it must be secondary, we tested above
740 self
.physical_id
= s_data
+ p_data
+ (sminor
, secret
)
742 self
.physical_id
= self
.logical_id
746 """Disk-specific conversion to standard python types.
748 This replaces the children lists of objects with lists of
749 standard python types.
752 bo
= super(Disk
, self
).ToDict()
754 for attr
in ("children",):
755 alist
= bo
.get(attr
, None)
757 bo
[attr
] = outils
.ContainerToDicts(alist
)
761 def FromDict(cls
, val
):
762 """Custom function for Disks
765 obj
= super(Disk
, cls
).FromDict(val
)
767 obj
.children
= outils
.ContainerFromDicts(obj
.children
, list, Disk
)
768 if obj
.logical_id
and isinstance(obj
.logical_id
, list):
769 obj
.logical_id
= tuple(obj
.logical_id
)
770 if obj
.physical_id
and isinstance(obj
.physical_id
, list):
771 obj
.physical_id
= tuple(obj
.physical_id
)
772 if obj
.dev_type
in constants
.LDS_DRBD
:
773 # we need a tuple of length six here
774 if len(obj
.logical_id
) < 6:
775 obj
.logical_id
+= (None,) * (6 - len(obj
.logical_id
))
779 """Custom str() formatter for disks.
782 if self
.dev_type
== constants
.LD_LV
:
783 val
= "<LogicalVolume(/dev/%s/%s" % self
.logical_id
784 elif self
.dev_type
in constants
.LDS_DRBD
:
785 node_a
, node_b
, port
, minor_a
, minor_b
= self
.logical_id
[:5]
787 if self
.physical_id
is None:
790 phy
= ("configured as %s:%s %s:%s" %
791 (self
.physical_id
[0], self
.physical_id
[1],
792 self
.physical_id
[2], self
.physical_id
[3]))
794 val
+= ("hosts=%s/%d-%s/%d, port=%s, %s, " %
795 (node_a
, minor_a
, node_b
, minor_b
, port
, phy
))
796 if self
.children
and self
.children
.count(None) == 0:
797 val
+= "backend=%s, metadev=%s" % (self
.children
[0], self
.children
[1])
799 val
+= "no local storage"
801 val
= ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
802 (self
.dev_type
, self
.logical_id
, self
.physical_id
, self
.children
))
803 if self
.iv_name
is None:
804 val
+= ", not visible"
806 val
+= ", visible as /dev/%s" % self
.iv_name
807 if isinstance(self
.size
, int):
808 val
+= ", size=%dm)>" % self
.size
810 val
+= ", size='%s')>" % (self
.size
,)
814 """Checks that this disk is correctly configured.
818 if self
.mode
not in constants
.DISK_ACCESS_SET
:
819 all_errors
.append("Disk access mode '%s' is invalid" % (self
.mode
, ))
822 def UpgradeConfig(self
):
823 """Fill defaults for missing configuration values.
827 for child
in self
.children
:
828 child
.UpgradeConfig()
830 # FIXME: Make this configurable in Ganeti 2.7
832 # add here config upgrade for this disk
834 # If the file driver is empty, fill it up with the default value
835 if self
.dev_type
== constants
.LD_FILE
and self
.physical_id
[0] is None:
836 self
.physical_id
[0] = constants
.FD_DEFAULT
839 def ComputeLDParams(disk_template
, disk_params
):
840 """Computes Logical Disk parameters from Disk Template parameters.
842 @type disk_template: string
843 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
844 @type disk_params: dict
845 @param disk_params: disk template parameters;
846 dict(template_name -> parameters
848 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
849 contains the LD parameters of the node. The tree is flattened in-order.
852 if disk_template
not in constants
.DISK_TEMPLATES
:
853 raise errors
.ProgrammerError("Unknown disk template %s" % disk_template
)
855 assert disk_template
in disk_params
858 dt_params
= disk_params
[disk_template
]
859 if disk_template
== constants
.DT_DRBD8
:
860 result
.append(FillDict(constants
.DISK_LD_DEFAULTS
[constants
.LD_DRBD8
], {
861 constants
.LDP_RESYNC_RATE
: dt_params
[constants
.DRBD_RESYNC_RATE
],
862 constants
.LDP_BARRIERS
: dt_params
[constants
.DRBD_DISK_BARRIERS
],
863 constants
.LDP_NO_META_FLUSH
: dt_params
[constants
.DRBD_META_BARRIERS
],
864 constants
.LDP_DEFAULT_METAVG
: dt_params
[constants
.DRBD_DEFAULT_METAVG
],
865 constants
.LDP_DISK_CUSTOM
: dt_params
[constants
.DRBD_DISK_CUSTOM
],
866 constants
.LDP_NET_CUSTOM
: dt_params
[constants
.DRBD_NET_CUSTOM
],
867 constants
.LDP_DYNAMIC_RESYNC
: dt_params
[constants
.DRBD_DYNAMIC_RESYNC
],
868 constants
.LDP_PLAN_AHEAD
: dt_params
[constants
.DRBD_PLAN_AHEAD
],
869 constants
.LDP_FILL_TARGET
: dt_params
[constants
.DRBD_FILL_TARGET
],
870 constants
.LDP_DELAY_TARGET
: dt_params
[constants
.DRBD_DELAY_TARGET
],
871 constants
.LDP_MAX_RATE
: dt_params
[constants
.DRBD_MAX_RATE
],
872 constants
.LDP_MIN_RATE
: dt_params
[constants
.DRBD_MIN_RATE
],
876 result
.append(FillDict(constants
.DISK_LD_DEFAULTS
[constants
.LD_LV
], {
877 constants
.LDP_STRIPES
: dt_params
[constants
.DRBD_DATA_STRIPES
],
881 result
.append(FillDict(constants
.DISK_LD_DEFAULTS
[constants
.LD_LV
], {
882 constants
.LDP_STRIPES
: dt_params
[constants
.DRBD_META_STRIPES
],
885 elif disk_template
in (constants
.DT_FILE
, constants
.DT_SHARED_FILE
):
886 result
.append(constants
.DISK_LD_DEFAULTS
[constants
.LD_FILE
])
888 elif disk_template
== constants
.DT_PLAIN
:
889 result
.append(FillDict(constants
.DISK_LD_DEFAULTS
[constants
.LD_LV
], {
890 constants
.LDP_STRIPES
: dt_params
[constants
.LV_STRIPES
],
893 elif disk_template
== constants
.DT_BLOCK
:
894 result
.append(constants
.DISK_LD_DEFAULTS
[constants
.LD_BLOCKDEV
])
896 elif disk_template
== constants
.DT_RBD
:
897 result
.append(FillDict(constants
.DISK_LD_DEFAULTS
[constants
.LD_RBD
], {
898 constants
.LDP_POOL
: dt_params
[constants
.RBD_POOL
],
901 elif disk_template
== constants
.DT_EXT
:
902 result
.append(constants
.DISK_LD_DEFAULTS
[constants
.LD_EXT
])
907 class InstancePolicy(ConfigObject
):
908 """Config object representing instance policy limits dictionary.
910 Note that this object is not actually used in the config, it's just
911 used as a placeholder for a few functions.
915 def CheckParameterSyntax(cls
, ipolicy
, check_std
):
916 """ Check the instance policy for validity.
919 @param ipolicy: dictionary with min/max/std specs and policies
920 @type check_std: bool
921 @param check_std: Whether to check std value or just assume compliance
922 @raise errors.ConfigurationError: when the policy is not legal
925 InstancePolicy
.CheckISpecSyntax(ipolicy
, check_std
)
926 if constants
.IPOLICY_DTS
in ipolicy
:
927 InstancePolicy
.CheckDiskTemplates(ipolicy
[constants
.IPOLICY_DTS
])
928 for key
in constants
.IPOLICY_PARAMETERS
:
930 InstancePolicy
.CheckParameter(key
, ipolicy
[key
])
931 wrong_keys
= frozenset(ipolicy
.keys()) - constants
.IPOLICY_ALL_KEYS
933 raise errors
.ConfigurationError("Invalid keys in ipolicy: %s" %
934 utils
.CommaJoin(wrong_keys
))
937 def _CheckIncompleteSpec(cls
, spec
, keyname
):
938 missing_params
= constants
.ISPECS_PARAMETERS
- frozenset(spec
.keys())
940 msg
= ("Missing instance specs parameters for %s: %s" %
941 (keyname
, utils
.CommaJoin(missing_params
)))
942 raise errors
.ConfigurationError(msg
)
945 def CheckISpecSyntax(cls
, ipolicy
, check_std
):
946 """Check the instance policy specs for validity.
949 @param ipolicy: dictionary with min/max/std specs
950 @type check_std: bool
951 @param check_std: Whether to check std value or just assume compliance
952 @raise errors.ConfigurationError: when specs are not valid
955 if constants
.ISPECS_MINMAX
not in ipolicy
:
959 if check_std
and constants
.ISPECS_STD
not in ipolicy
:
960 msg
= "Missing key in ipolicy: %s" % constants
.ISPECS_STD
961 raise errors
.ConfigurationError(msg
)
962 stdspec
= ipolicy
.get(constants
.ISPECS_STD
)
964 InstancePolicy
._CheckIncompleteSpec(stdspec
, constants
.ISPECS_STD
)
966 if not ipolicy
[constants
.ISPECS_MINMAX
]:
967 raise errors
.ConfigurationError("Empty minmax specifications")
969 for minmaxspecs
in ipolicy
[constants
.ISPECS_MINMAX
]:
970 missing
= constants
.ISPECS_MINMAX_KEYS
- frozenset(minmaxspecs
.keys())
972 msg
= "Missing instance specification: %s" % utils
.CommaJoin(missing
)
973 raise errors
.ConfigurationError(msg
)
974 for (key
, spec
) in minmaxspecs
.items():
975 InstancePolicy
._CheckIncompleteSpec(spec
, key
)
978 for param
in constants
.ISPECS_PARAMETERS
:
979 par_std_ok
= InstancePolicy
._CheckISpecParamSyntax(minmaxspecs
, stdspec
,
981 spec_std_ok
= spec_std_ok
and par_std_ok
982 std_is_good
= std_is_good
or spec_std_ok
984 raise errors
.ConfigurationError("Invalid std specifications")
987 def _CheckISpecParamSyntax(cls
, minmaxspecs
, stdspec
, name
, check_std
):
988 """Check the instance policy specs for validity on a given key.
990 We check if the instance specs makes sense for a given key, that is
991 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
993 @type minmaxspecs: dict
994 @param minmaxspecs: dictionary with min and max instance spec
996 @param stdspec: dictionary with standard instance spec
998 @param name: what are the limits for
999 @type check_std: bool
1000 @param check_std: Whether to check std value or just assume compliance
1002 @return: C{True} when specs are valid, C{False} when standard spec for the
1003 given name is not valid
1004 @raise errors.ConfigurationError: when min/max specs for the given name
1008 minspec
= minmaxspecs
[constants
.ISPECS_MIN
]
1009 maxspec
= minmaxspecs
[constants
.ISPECS_MAX
]
1010 min_v
= minspec
[name
]
1011 max_v
= maxspec
[name
]
1014 err
= ("Invalid specification of min/max values for %s: %s/%s" %
1015 (name
, min_v
, max_v
))
1016 raise errors
.ConfigurationError(err
)
1018 std_v
= stdspec
.get(name
, min_v
)
1019 return std_v
>= min_v
and std_v
<= max_v
1024 def CheckDiskTemplates(cls
, disk_templates
):
1025 """Checks the disk templates for validity.
1028 if not disk_templates
:
1029 raise errors
.ConfigurationError("Instance policy must contain" +
1030 " at least one disk template")
1031 wrong
= frozenset(disk_templates
).difference(constants
.DISK_TEMPLATES
)
1033 raise errors
.ConfigurationError("Invalid disk template(s) %s" %
1034 utils
.CommaJoin(wrong
))
1037 def CheckParameter(cls
, key
, value
):
1038 """Checks a parameter.
1040 Currently we expect all parameters to be float values.
1045 except (TypeError, ValueError), err
:
1046 raise errors
.ConfigurationError("Invalid value for key" " '%s':"
1047 " '%s', error: %s" % (key
, value
, err
))
1050 class Instance(TaggableObject
):
1051 """Config object representing an instance."""
1067 ] + _TIMESTAMPS
+ _UUID
1069 def _ComputeSecondaryNodes(self
):
1070 """Compute the list of secondary nodes.
1072 This is a simple wrapper over _ComputeAllNodes.
1075 all_nodes
= set(self
._ComputeAllNodes())
1076 all_nodes
.discard(self
.primary_node
)
1077 return tuple(all_nodes
)
1079 secondary_nodes
= property(_ComputeSecondaryNodes
, None, None,
1080 "List of names of secondary nodes")
1082 def _ComputeAllNodes(self
):
1083 """Compute the list of all nodes.
1085 Since the data is already there (in the drbd disks), keeping it as
1086 a separate normal attribute is redundant and if not properly
1087 synchronised can cause problems. Thus it's better to compute it
1091 def _Helper(nodes
, device
):
1092 """Recursively computes nodes given a top device."""
1093 if device
.dev_type
in constants
.LDS_DRBD
:
1094 nodea
, nodeb
= device
.logical_id
[:2]
1098 for child
in device
.children
:
1099 _Helper(nodes
, child
)
1102 all_nodes
.add(self
.primary_node
)
1103 for device
in self
.disks
:
1104 _Helper(all_nodes
, device
)
1105 return tuple(all_nodes
)
1107 all_nodes
= property(_ComputeAllNodes
, None, None,
1108 "List of names of all the nodes of the instance")
1110 def MapLVsByNode(self
, lvmap
=None, devs
=None, node
=None):
1111 """Provide a mapping of nodes to LVs this instance owns.
1113 This function figures out what logical volumes should belong on
1114 which nodes, recursing through a device tree.
1116 @param lvmap: optional dictionary to receive the
1117 'node' : ['lv', ...] data.
1119 @return: None if lvmap arg is given, otherwise, a dictionary of
1120 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1121 volumeN is of the form "vg_name/lv_name", compatible with
1126 node
= self
.primary_node
1134 if not node
in lvmap
:
1142 if dev
.dev_type
== constants
.LD_LV
:
1143 lvmap
[node
].append(dev
.logical_id
[0] + "/" + dev
.logical_id
[1])
1145 elif dev
.dev_type
in constants
.LDS_DRBD
:
1147 self
.MapLVsByNode(lvmap
, dev
.children
, dev
.logical_id
[0])
1148 self
.MapLVsByNode(lvmap
, dev
.children
, dev
.logical_id
[1])
1151 self
.MapLVsByNode(lvmap
, dev
.children
, node
)
1155 def FindDisk(self
, idx
):
1156 """Find a disk given having a specified index.
1158 This is just a wrapper that does validation of the index.
1161 @param idx: the disk index
1163 @return: the corresponding disk
1164 @raise errors.OpPrereqError: when the given index is not valid
1169 return self
.disks
[idx
]
1170 except (TypeError, ValueError), err
:
1171 raise errors
.OpPrereqError("Invalid disk index: '%s'" % str(err
),
1174 raise errors
.OpPrereqError("Invalid disk index: %d (instace has disks"
1175 " 0 to %d" % (idx
, len(self
.disks
) - 1),
1179 """Instance-specific conversion to standard python types.
1181 This replaces the children lists of objects with lists of standard
1185 bo
= super(Instance
, self
).ToDict()
1187 for attr
in "nics", "disks":
1188 alist
= bo
.get(attr
, None)
1190 nlist
= outils
.ContainerToDicts(alist
)
1197 def FromDict(cls
, val
):
1198 """Custom function for instances.
1201 if "admin_state" not in val
:
1202 if val
.get("admin_up", False):
1203 val
["admin_state"] = constants
.ADMINST_UP
1205 val
["admin_state"] = constants
.ADMINST_DOWN
1206 if "admin_up" in val
:
1208 obj
= super(Instance
, cls
).FromDict(val
)
1209 obj
.nics
= outils
.ContainerFromDicts(obj
.nics
, list, NIC
)
1210 obj
.disks
= outils
.ContainerFromDicts(obj
.disks
, list, Disk
)
1213 def UpgradeConfig(self
):
1214 """Fill defaults for missing configuration values.
1217 for nic
in self
.nics
:
1219 for disk
in self
.disks
:
1220 disk
.UpgradeConfig()
1222 for key
in constants
.HVC_GLOBALS
:
1224 del self
.hvparams
[key
]
1227 if self
.osparams
is None:
1229 UpgradeBeParams(self
.beparams
)
1230 if self
.disks_active
is None:
1231 self
.disks_active
= self
.admin_state
== constants
.ADMINST_UP
1234 class OS(ConfigObject
):
1235 """Config object representing an operating system.
1237 @type supported_parameters: list
1238 @ivar supported_parameters: a list of tuples, name and description,
1239 containing the supported parameters by this OS
1241 @type VARIANT_DELIM: string
1242 @cvar VARIANT_DELIM: the variant delimiter
1254 "supported_variants",
1255 "supported_parameters",
1261 def SplitNameVariant(cls
, name
):
1262 """Splits the name into the proper name and variant.
1264 @param name: the OS (unprocessed) name
1266 @return: a list of two elements; if the original name didn't
1267 contain a variant, it's returned as an empty string
1270 nv
= name
.split(cls
.VARIANT_DELIM
, 1)
1276 def GetName(cls
, name
):
1277 """Returns the proper name of the os (without the variant).
1279 @param name: the OS (unprocessed) name
1282 return cls
.SplitNameVariant(name
)[0]
1285 def GetVariant(cls
, name
):
1286 """Returns the variant the os (without the base name).
1288 @param name: the OS (unprocessed) name
1291 return cls
.SplitNameVariant(name
)[1]
1294 class ExtStorage(ConfigObject
):
1295 """Config object representing an External Storage Provider.
1308 "supported_parameters",
1312 class NodeHvState(ConfigObject
):
1313 """Hypvervisor state on a node.
1315 @ivar mem_total: Total amount of memory
1316 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1318 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1320 @ivar mem_inst: Memory used by instances living on node
1321 @ivar cpu_total: Total node CPU core count
1322 @ivar cpu_node: Number of CPU cores reserved for the node itself
1335 class NodeDiskState(ConfigObject
):
1336 """Disk state on a node.
1346 class Node(TaggableObject
):
1347 """Config object representing a node.
1349 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1350 @ivar hv_state_static: Hypervisor state overriden by user
1351 @ivar disk_state: Disk state (e.g. free space)
1352 @ivar disk_state_static: Disk state overriden by user
1371 "disk_state_static",
1372 ] + _TIMESTAMPS
+ _UUID
1374 def UpgradeConfig(self
):
1375 """Fill defaults for missing configuration values.
1378 # pylint: disable=E0203
1379 # because these are "defined" via slots, not manually
1380 if self
.master_capable
is None:
1381 self
.master_capable
= True
1383 if self
.vm_capable
is None:
1384 self
.vm_capable
= True
1386 if self
.ndparams
is None:
1388 # And remove any global parameter
1389 for key
in constants
.NDC_GLOBALS
:
1390 if key
in self
.ndparams
:
1391 logging
.warning("Ignoring %s node parameter for node %s",
1393 del self
.ndparams
[key
]
1395 if self
.powered
is None:
1399 """Custom function for serializing.
1402 data
= super(Node
, self
).ToDict()
1404 hv_state
= data
.get("hv_state", None)
1405 if hv_state
is not None:
1406 data
["hv_state"] = outils
.ContainerToDicts(hv_state
)
1408 disk_state
= data
.get("disk_state", None)
1409 if disk_state
is not None:
1410 data
["disk_state"] = \
1411 dict((key
, outils
.ContainerToDicts(value
))
1412 for (key
, value
) in disk_state
.items())
1417 def FromDict(cls
, val
):
1418 """Custom function for deserializing.
1421 obj
= super(Node
, cls
).FromDict(val
)
1423 if obj
.hv_state
is not None:
1425 outils
.ContainerFromDicts(obj
.hv_state
, dict, NodeHvState
)
1427 if obj
.disk_state
is not None:
1429 dict((key
, outils
.ContainerFromDicts(value
, dict, NodeDiskState
))
1430 for (key
, value
) in obj
.disk_state
.items())
1435 class NodeGroup(TaggableObject
):
1436 """Config object representing a node group."""
1445 "disk_state_static",
1448 ] + _TIMESTAMPS
+ _UUID
1451 """Custom function for nodegroup.
1453 This discards the members object, which gets recalculated and is only kept
1457 mydict
= super(NodeGroup
, self
).ToDict()
1458 del mydict
["members"]
1462 def FromDict(cls
, val
):
1463 """Custom function for nodegroup.
1465 The members slot is initialized to an empty list, upon deserialization.
1468 obj
= super(NodeGroup
, cls
).FromDict(val
)
1472 def UpgradeConfig(self
):
1473 """Fill defaults for missing configuration values.
1476 if self
.ndparams
is None:
1479 if self
.serial_no
is None:
1482 if self
.alloc_policy
is None:
1483 self
.alloc_policy
= constants
.ALLOC_POLICY_PREFERRED
1485 # We only update mtime, and not ctime, since we would not be able
1486 # to provide a correct value for creation time.
1487 if self
.mtime
is None:
1488 self
.mtime
= time
.time()
1490 if self
.diskparams
is None:
1491 self
.diskparams
= {}
1492 if self
.ipolicy
is None:
1493 self
.ipolicy
= MakeEmptyIPolicy()
1495 if self
.networks
is None:
1498 def FillND(self
, node
):
1499 """Return filled out ndparams for L{objects.Node}
1501 @type node: L{objects.Node}
1502 @param node: A Node object to fill
1503 @return a copy of the node's ndparams with defaults filled
1506 return self
.SimpleFillND(node
.ndparams
)
1508 def SimpleFillND(self
, ndparams
):
1509 """Fill a given ndparams dict with defaults.
1511 @type ndparams: dict
1512 @param ndparams: the dict to fill
1514 @return: a copy of the passed in ndparams with missing keys filled
1515 from the node group defaults
1518 return FillDict(self
.ndparams
, ndparams
)
1521 class Cluster(TaggableObject
):
1522 """Config object representing the cluster."""
1527 "highest_used_port",
1530 "volume_group_name",
1532 "drbd_usermode_helper",
1534 "default_hypervisor",
1539 "use_external_mip_script",
1542 "shared_file_storage_dir",
1543 "enabled_hypervisors",
1552 "candidate_pool_size",
1555 "maintain_node_health",
1557 "default_iallocator",
1560 "primary_ip_family",
1561 "prealloc_wipe_disks",
1563 "disk_state_static",
1564 "enabled_disk_templates",
1565 ] + _TIMESTAMPS
+ _UUID
1567 def UpgradeConfig(self
):
1568 """Fill defaults for missing configuration values.
1571 # pylint: disable=E0203
1572 # because these are "defined" via slots, not manually
1573 if self
.hvparams
is None:
1574 self
.hvparams
= constants
.HVC_DEFAULTS
1576 for hypervisor
in self
.hvparams
:
1577 self
.hvparams
[hypervisor
] = FillDict(
1578 constants
.HVC_DEFAULTS
[hypervisor
], self
.hvparams
[hypervisor
])
1580 if self
.os_hvp
is None:
1583 # osparams added before 2.2
1584 if self
.osparams
is None:
1587 self
.ndparams
= UpgradeNDParams(self
.ndparams
)
1589 self
.beparams
= UpgradeGroupedParams(self
.beparams
,
1590 constants
.BEC_DEFAULTS
)
1591 for beparams_group
in self
.beparams
:
1592 UpgradeBeParams(self
.beparams
[beparams_group
])
1594 migrate_default_bridge
= not self
.nicparams
1595 self
.nicparams
= UpgradeGroupedParams(self
.nicparams
,
1596 constants
.NICC_DEFAULTS
)
1597 if migrate_default_bridge
:
1598 self
.nicparams
[constants
.PP_DEFAULT
][constants
.NIC_LINK
] = \
1601 if self
.modify_etc_hosts
is None:
1602 self
.modify_etc_hosts
= True
1604 if self
.modify_ssh_setup
is None:
1605 self
.modify_ssh_setup
= True
1607 # default_bridge is no longer used in 2.1. The slot is left there to
1608 # support auto-upgrading. It can be removed once we decide to deprecate
1609 # upgrading straight from 2.0.
1610 if self
.default_bridge
is not None:
1611 self
.default_bridge
= None
1613 # default_hypervisor is just the first enabled one in 2.1. This slot and
1614 # code can be removed once upgrading straight from 2.0 is deprecated.
1615 if self
.default_hypervisor
is not None:
1616 self
.enabled_hypervisors
= ([self
.default_hypervisor
] +
1617 [hvname
for hvname
in self
.enabled_hypervisors
1618 if hvname
!= self
.default_hypervisor
])
1619 self
.default_hypervisor
= None
1621 # maintain_node_health added after 2.1.1
1622 if self
.maintain_node_health
is None:
1623 self
.maintain_node_health
= False
1625 if self
.uid_pool
is None:
1628 if self
.default_iallocator
is None:
1629 self
.default_iallocator
= ""
1631 # reserved_lvs added before 2.2
1632 if self
.reserved_lvs
is None:
1633 self
.reserved_lvs
= []
1635 # hidden and blacklisted operating systems added before 2.2.1
1636 if self
.hidden_os
is None:
1639 if self
.blacklisted_os
is None:
1640 self
.blacklisted_os
= []
1642 # primary_ip_family added before 2.3
1643 if self
.primary_ip_family
is None:
1644 self
.primary_ip_family
= AF_INET
1646 if self
.master_netmask
is None:
1647 ipcls
= netutils
.IPAddress
.GetClassFromIpFamily(self
.primary_ip_family
)
1648 self
.master_netmask
= ipcls
.iplen
1650 if self
.prealloc_wipe_disks
is None:
1651 self
.prealloc_wipe_disks
= False
1653 # shared_file_storage_dir added before 2.5
1654 if self
.shared_file_storage_dir
is None:
1655 self
.shared_file_storage_dir
= ""
1657 if self
.use_external_mip_script
is None:
1658 self
.use_external_mip_script
= False
1661 self
.diskparams
= UpgradeDiskParams(self
.diskparams
)
1663 self
.diskparams
= constants
.DISK_DT_DEFAULTS
.copy()
1665 # instance policy added before 2.6
1666 if self
.ipolicy
is None:
1667 self
.ipolicy
= FillIPolicy(constants
.IPOLICY_DEFAULTS
, {})
1669 # we can either make sure to upgrade the ipolicy always, or only
1670 # do it in some corner cases (e.g. missing keys); note that this
1671 # will break any removal of keys from the ipolicy dict
1672 wrongkeys
= frozenset(self
.ipolicy
.keys()) - constants
.IPOLICY_ALL_KEYS
1674 # These keys would be silently removed by FillIPolicy()
1675 msg
= ("Cluster instance policy contains spurious keys: %s" %
1676 utils
.CommaJoin(wrongkeys
))
1677 raise errors
.ConfigurationError(msg
)
1678 self
.ipolicy
= FillIPolicy(constants
.IPOLICY_DEFAULTS
, self
.ipolicy
)
1681 def primary_hypervisor(self
):
1682 """The first hypervisor is the primary.
1684 Useful, for example, for L{Node}'s hv/disk state.
1687 return self
.enabled_hypervisors
[0]
1690 """Custom function for cluster.
1693 mydict
= super(Cluster
, self
).ToDict()
1695 if self
.tcpudp_port_pool
is None:
1696 tcpudp_port_pool
= []
1698 tcpudp_port_pool
= list(self
.tcpudp_port_pool
)
1700 mydict
["tcpudp_port_pool"] = tcpudp_port_pool
1705 def FromDict(cls
, val
):
1706 """Custom function for cluster.
1709 obj
= super(Cluster
, cls
).FromDict(val
)
1711 if obj
.tcpudp_port_pool
is None:
1712 obj
.tcpudp_port_pool
= set()
1713 elif not isinstance(obj
.tcpudp_port_pool
, set):
1714 obj
.tcpudp_port_pool
= set(obj
.tcpudp_port_pool
)
1718 def SimpleFillDP(self
, diskparams
):
1719 """Fill a given diskparams dict with cluster defaults.
1721 @param diskparams: The diskparams
1722 @return: The defaults dict
1725 return FillDiskParams(self
.diskparams
, diskparams
)
1727 def GetHVDefaults(self
, hypervisor
, os_name
=None, skip_keys
=None):
1728 """Get the default hypervisor parameters for the cluster.
1730 @param hypervisor: the hypervisor name
1731 @param os_name: if specified, we'll also update the defaults for this OS
1732 @param skip_keys: if passed, list of keys not to use
1733 @return: the defaults dict
1736 if skip_keys
is None:
1739 fill_stack
= [self
.hvparams
.get(hypervisor
, {})]
1740 if os_name
is not None:
1741 os_hvp
= self
.os_hvp
.get(os_name
, {}).get(hypervisor
, {})
1742 fill_stack
.append(os_hvp
)
1745 for o_dict
in fill_stack
:
1746 ret_dict
= FillDict(ret_dict
, o_dict
, skip_keys
=skip_keys
)
1750 def SimpleFillHV(self
, hv_name
, os_name
, hvparams
, skip_globals
=False):
1751 """Fill a given hvparams dict with cluster defaults.
1753 @type hv_name: string
1754 @param hv_name: the hypervisor to use
1755 @type os_name: string
1756 @param os_name: the OS to use for overriding the hypervisor defaults
1757 @type skip_globals: boolean
1758 @param skip_globals: if True, the global hypervisor parameters will
1761 @return: a copy of the given hvparams with missing keys filled from
1762 the cluster defaults
1766 skip_keys
= constants
.HVC_GLOBALS
1770 def_dict
= self
.GetHVDefaults(hv_name
, os_name
, skip_keys
=skip_keys
)
1771 return FillDict(def_dict
, hvparams
, skip_keys
=skip_keys
)
1773 def FillHV(self
, instance
, skip_globals
=False):
1774 """Fill an instance's hvparams dict with cluster defaults.
1776 @type instance: L{objects.Instance}
1777 @param instance: the instance parameter to fill
1778 @type skip_globals: boolean
1779 @param skip_globals: if True, the global hypervisor parameters will
1782 @return: a copy of the instance's hvparams with missing keys filled from
1783 the cluster defaults
1786 return self
.SimpleFillHV(instance
.hypervisor
, instance
.os
,
1787 instance
.hvparams
, skip_globals
)
1789 def SimpleFillBE(self
, beparams
):
1790 """Fill a given beparams dict with cluster defaults.
1792 @type beparams: dict
1793 @param beparams: the dict to fill
1795 @return: a copy of the passed in beparams with missing keys filled
1796 from the cluster defaults
1799 return FillDict(self
.beparams
.get(constants
.PP_DEFAULT
, {}), beparams
)
1801 def FillBE(self
, instance
):
1802 """Fill an instance's beparams dict with cluster defaults.
1804 @type instance: L{objects.Instance}
1805 @param instance: the instance parameter to fill
1807 @return: a copy of the instance's beparams with missing keys filled from
1808 the cluster defaults
1811 return self
.SimpleFillBE(instance
.beparams
)
1813 def SimpleFillNIC(self
, nicparams
):
1814 """Fill a given nicparams dict with cluster defaults.
1816 @type nicparams: dict
1817 @param nicparams: the dict to fill
1819 @return: a copy of the passed in nicparams with missing keys filled
1820 from the cluster defaults
1823 return FillDict(self
.nicparams
.get(constants
.PP_DEFAULT
, {}), nicparams
)
1825 def SimpleFillOS(self
, os_name
, os_params
):
1826 """Fill an instance's osparams dict with cluster defaults.
1828 @type os_name: string
1829 @param os_name: the OS name to use
1830 @type os_params: dict
1831 @param os_params: the dict to fill with default values
1833 @return: a copy of the instance's osparams with missing keys filled from
1834 the cluster defaults
1837 name_only
= os_name
.split("+", 1)[0]
1839 result
= self
.osparams
.get(name_only
, {})
1841 result
= FillDict(result
, self
.osparams
.get(os_name
, {}))
1843 return FillDict(result
, os_params
)
1846 def SimpleFillHvState(hv_state
):
1847 """Fill an hv_state sub dict with cluster defaults.
1850 return FillDict(constants
.HVST_DEFAULTS
, hv_state
)
1853 def SimpleFillDiskState(disk_state
):
1854 """Fill an disk_state sub dict with cluster defaults.
1857 return FillDict(constants
.DS_DEFAULTS
, disk_state
)
1859 def FillND(self
, node
, nodegroup
):
1860 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1862 @type node: L{objects.Node}
1863 @param node: A Node object to fill
1864 @type nodegroup: L{objects.NodeGroup}
1865 @param nodegroup: A Node object to fill
1866 @return a copy of the node's ndparams with defaults filled
1869 return self
.SimpleFillND(nodegroup
.FillND(node
))
1871 def SimpleFillND(self
, ndparams
):
1872 """Fill a given ndparams dict with defaults.
1874 @type ndparams: dict
1875 @param ndparams: the dict to fill
1877 @return: a copy of the passed in ndparams with missing keys filled
1878 from the cluster defaults
1881 return FillDict(self
.ndparams
, ndparams
)
1883 def SimpleFillIPolicy(self
, ipolicy
):
1884 """ Fill instance policy dict with defaults.
1887 @param ipolicy: the dict to fill
1889 @return: a copy of passed ipolicy with missing keys filled from
1890 the cluster defaults
1893 return FillIPolicy(self
.ipolicy
, ipolicy
)
1896 class BlockDevStatus(ConfigObject
):
1897 """Config object representing the status of a block device."""
1909 class ImportExportStatus(ConfigObject
):
1910 """Config object representing the status of an import or export."""
1916 "progress_throughput",
1924 class ImportExportOptions(ConfigObject
):
1925 """Options for import/export daemon
1927 @ivar key_name: X509 key name (None for cluster certificate)
1928 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1929 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1930 @ivar magic: Used to ensure the connection goes to the right disk
1931 @ivar ipv6: Whether to use IPv6
1932 @ivar connect_timeout: Number of seconds for establishing connection
1945 class ConfdRequest(ConfigObject
):
1946 """Object holding a confd request.
1948 @ivar protocol: confd protocol version
1949 @ivar type: confd query type
1950 @ivar query: query request
1951 @ivar rsalt: requested reply salt
1962 class ConfdReply(ConfigObject
):
1963 """Object holding a confd reply.
1965 @ivar protocol: confd protocol version
1966 @ivar status: reply status code (ok, error)
1967 @ivar answer: confd query reply
1968 @ivar serial: configuration serial number
1979 class QueryFieldDefinition(ConfigObject
):
1980 """Object holding a query field definition.
1982 @ivar name: Field name
1983 @ivar title: Human-readable title
1984 @ivar kind: Field type
1985 @ivar doc: Human-readable description
1996 class _QueryResponseBase(ConfigObject
):
2002 """Custom function for serializing.
2005 mydict
= super(_QueryResponseBase
, self
).ToDict()
2006 mydict
["fields"] = outils
.ContainerToDicts(mydict
["fields"])
2010 def FromDict(cls
, val
):
2011 """Custom function for de-serializing.
2014 obj
= super(_QueryResponseBase
, cls
).FromDict(val
)
2016 outils
.ContainerFromDicts(obj
.fields
, list, QueryFieldDefinition
)
2020 class QueryResponse(_QueryResponseBase
):
2021 """Object holding the response to a query.
2023 @ivar fields: List of L{QueryFieldDefinition} objects
2024 @ivar data: Requested data
2032 class QueryFieldsRequest(ConfigObject
):
2033 """Object holding a request for querying available fields.
2042 class QueryFieldsResponse(_QueryResponseBase
):
2043 """Object holding the response to a query for fields.
2045 @ivar fields: List of L{QueryFieldDefinition} objects
2051 class MigrationStatus(ConfigObject
):
2052 """Object holding the status of a migration.
2062 class InstanceConsole(ConfigObject
):
2063 """Object describing how to access the console of an instance.
2078 """Validates contents of this object.
2081 assert self
.kind
in constants
.CONS_ALL
, "Unknown console type"
2082 assert self
.instance
, "Missing instance name"
2083 assert self
.message
or self
.kind
in [constants
.CONS_SSH
,
2084 constants
.CONS_SPICE
,
2086 assert self
.host
or self
.kind
== constants
.CONS_MESSAGE
2087 assert self
.port
or self
.kind
in [constants
.CONS_MESSAGE
,
2089 assert self
.user
or self
.kind
in [constants
.CONS_MESSAGE
,
2090 constants
.CONS_SPICE
,
2092 assert self
.command
or self
.kind
in [constants
.CONS_MESSAGE
,
2093 constants
.CONS_SPICE
,
2095 assert self
.display
or self
.kind
in [constants
.CONS_MESSAGE
,
2096 constants
.CONS_SPICE
,
2101 class Network(TaggableObject
):
2102 """Object representing a network definition for ganeti.
2115 ] + _TIMESTAMPS
+ _UUID
2117 def HooksDict(self
, prefix
=""):
2118 """Export a dictionary used by hooks with a network's information.
2120 @type prefix: String
2121 @param prefix: Prefix to prepend to the dict entries
2125 "%sNETWORK_NAME" % prefix
: self
.name
,
2126 "%sNETWORK_UUID" % prefix
: self
.uuid
,
2127 "%sNETWORK_TAGS" % prefix
: " ".join(self
.GetTags()),
2130 result
["%sNETWORK_SUBNET" % prefix
] = self
.network
2132 result
["%sNETWORK_GATEWAY" % prefix
] = self
.gateway
2134 result
["%sNETWORK_SUBNET6" % prefix
] = self
.network6
2136 result
["%sNETWORK_GATEWAY6" % prefix
] = self
.gateway6
2138 result
["%sNETWORK_MAC_PREFIX" % prefix
] = self
.mac_prefix
2143 def FromDict(cls
, val
):
2144 """Custom function for networks.
2146 Remove deprecated network_type and family.
2149 if "network_type" in val
:
2150 del val
["network_type"]
2153 obj
= super(Network
, cls
).FromDict(val
)
2157 class SerializableConfigParser(ConfigParser
.SafeConfigParser
):
2158 """Simple wrapper over ConfigParse that allows serialization.
2160 This class is basically ConfigParser.SafeConfigParser with two
2161 additional methods that allow it to serialize/unserialize to/from a
2166 """Dump this instance and return the string representation."""
2169 return buf
.getvalue()
2172 def Loads(cls
, data
):
2173 """Load data from a string."""
2174 buf
= StringIO(data
)
2180 class LvmPvInfo(ConfigObject
):
2181 """Information about an LVM physical volume (PV).
2184 @ivar name: name of the PV
2185 @type vg_name: string
2186 @ivar vg_name: name of the volume group containing the PV
2188 @ivar size: size of the PV in MiB
2190 @ivar free: free space in the PV, in MiB
2191 @type attributes: string
2192 @ivar attributes: PV attributes
2193 @type lv_list: list of strings
2194 @ivar lv_list: names of the LVs hosted on the PV
2206 """Is this PV empty?
2209 return self
.size
<= (self
.free
+ 1)
2211 def IsAllocatable(self
):
2212 """Is this PV allocatable?
2215 return ("a" in self
.attributes
)