Merge branch 'stable-2.12' into stable-2.13
[ganeti-github.git] / lib / objects.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Transportable objects for Ganeti.
32
33 This module provides small, mostly data-only objects which are safe to
34 pass to and from external parties.
35
36 """
37
38 # pylint: disable=E0203,W0201,R0902
39
40 # E0203: Access to member %r before its definition, since we use
41 # objects.py which doesn't explicitly initialise its members
42
43 # W0201: Attribute '%s' defined outside __init__
44
45 # R0902: Allow instances of these objects to have more than 20 attributes
46
47 import ConfigParser
48 import re
49 import copy
50 import logging
51 import time
52 from cStringIO import StringIO
53
54 from ganeti import errors
55 from ganeti import constants
56 from ganeti import netutils
57 from ganeti import outils
58 from ganeti import utils
59 from ganeti import serializer
60
61 from socket import AF_INET
62
63
64 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
65 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network",
66 "Filter"]
67
68 _TIMESTAMPS = ["ctime", "mtime"]
69 _UUID = ["uuid"]
70
71
72 def FillDict(defaults_dict, custom_dict, skip_keys=None):
73 """Basic function to apply settings on top a default dict.
74
75 @type defaults_dict: dict
76 @param defaults_dict: dictionary holding the default values
77 @type custom_dict: dict
78 @param custom_dict: dictionary holding customized value
79 @type skip_keys: list
80 @param skip_keys: which keys not to fill
81 @rtype: dict
82 @return: dict with the 'full' values
83
84 """
85 ret_dict = copy.deepcopy(defaults_dict)
86 ret_dict.update(custom_dict)
87 if skip_keys:
88 for k in skip_keys:
89 if k in ret_dict:
90 del ret_dict[k]
91 return ret_dict
92
93
94 def FillIPolicy(default_ipolicy, custom_ipolicy):
95 """Fills an instance policy with defaults.
96
97 """
98 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
99 ret_dict = copy.deepcopy(custom_ipolicy)
100 for key in default_ipolicy:
101 if key not in ret_dict:
102 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
103 elif key == constants.ISPECS_STD:
104 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
105 return ret_dict
106
107
108 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
109 """Fills the disk parameter defaults.
110
111 @see: L{FillDict} for parameters and return value
112
113 """
114 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
115
116 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
117 skip_keys=skip_keys))
118 for dt in constants.DISK_TEMPLATES)
119
120
121 def UpgradeGroupedParams(target, defaults):
122 """Update all groups for the target parameter.
123
124 @type target: dict of dicts
125 @param target: {group: {parameter: value}}
126 @type defaults: dict
127 @param defaults: default parameter values
128
129 """
130 if target is None:
131 target = {constants.PP_DEFAULT: defaults}
132 else:
133 for group in target:
134 target[group] = FillDict(defaults, target[group])
135 return target
136
137
138 def UpgradeBeParams(target):
139 """Update the be parameters dict to the new format.
140
141 @type target: dict
142 @param target: "be" parameters dict
143
144 """
145 if constants.BE_MEMORY in target:
146 memory = target[constants.BE_MEMORY]
147 target[constants.BE_MAXMEM] = memory
148 target[constants.BE_MINMEM] = memory
149 del target[constants.BE_MEMORY]
150
151
152 def UpgradeDiskParams(diskparams):
153 """Upgrade the disk parameters.
154
155 @type diskparams: dict
156 @param diskparams: disk parameters to upgrade
157 @rtype: dict
158 @return: the upgraded disk parameters dict
159
160 """
161 if not diskparams:
162 result = {}
163 else:
164 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
165
166 return result
167
168
169 def UpgradeNDParams(ndparams):
170 """Upgrade ndparams structure.
171
172 @type ndparams: dict
173 @param ndparams: disk parameters to upgrade
174 @rtype: dict
175 @return: the upgraded node parameters dict
176
177 """
178 if ndparams is None:
179 ndparams = {}
180
181 if (constants.ND_OOB_PROGRAM in ndparams and
182 ndparams[constants.ND_OOB_PROGRAM] is None):
183 # will be reset by the line below
184 del ndparams[constants.ND_OOB_PROGRAM]
185 return FillDict(constants.NDC_DEFAULTS, ndparams)
186
187
188 def MakeEmptyIPolicy():
189 """Create empty IPolicy dictionary.
190
191 """
192 return {}
193
194
195 class ConfigObject(outils.ValidatedSlots):
196 """A generic config object.
197
198 It has the following properties:
199
200 - provides somewhat safe recursive unpickling and pickling for its classes
201 - unset attributes which are defined in slots are always returned
202 as None instead of raising an error
203
204 Classes derived from this must always declare __slots__ (we use many
205 config objects and the memory reduction is useful)
206
207 """
208 __slots__ = []
209
210 def __getattr__(self, name):
211 if name not in self.GetAllSlots():
212 raise AttributeError("Invalid object attribute %s.%s" %
213 (type(self).__name__, name))
214 return None
215
216 def __setstate__(self, state):
217 slots = self.GetAllSlots()
218 for name in state:
219 if name in slots:
220 setattr(self, name, state[name])
221
222 def Validate(self):
223 """Validates the slots.
224
225 This method returns L{None} if the validation succeeds, or raises
226 an exception otherwise.
227
228 This method must be implemented by the child classes.
229
230 @rtype: NoneType
231 @return: L{None}, if the validation succeeds
232
233 @raise Exception: validation fails
234
235 """
236
237 def ToDict(self, _with_private=False):
238 """Convert to a dict holding only standard python types.
239
240 The generic routine just dumps all of this object's attributes in
241 a dict. It does not work if the class has children who are
242 ConfigObjects themselves (e.g. the nics list in an Instance), in
243 which case the object should subclass the function in order to
244 make sure all objects returned are only standard python types.
245
246 Private fields can be included or not with the _with_private switch.
247 The actual implementation of this switch is left for those subclassses
248 with private fields to implement.
249
250 @type _with_private: bool
251 @param _with_private: if True, the object will leak its private fields in
252 the dictionary representation. If False, the values
253 will be replaced with None.
254
255 """
256 result = {}
257 for name in self.GetAllSlots():
258 value = getattr(self, name, None)
259 if value is not None:
260 result[name] = value
261 return result
262
263 __getstate__ = ToDict
264
265 @classmethod
266 def FromDict(cls, val):
267 """Create an object from a dictionary.
268
269 This generic routine takes a dict, instantiates a new instance of
270 the given class, and sets attributes based on the dict content.
271
272 As for `ToDict`, this does not work if the class has children
273 who are ConfigObjects themselves (e.g. the nics list in an
274 Instance), in which case the object should subclass the function
275 and alter the objects.
276
277 """
278 if not isinstance(val, dict):
279 raise errors.ConfigurationError("Invalid object passed to FromDict:"
280 " expected dict, got %s" % type(val))
281 val_str = dict([(str(k), v) for k, v in val.iteritems()])
282 obj = cls(**val_str) # pylint: disable=W0142
283 return obj
284
285 def Copy(self):
286 """Makes a deep copy of the current object and its children.
287
288 """
289 dict_form = self.ToDict()
290 clone_obj = self.__class__.FromDict(dict_form)
291 return clone_obj
292
293 def __repr__(self):
294 """Implement __repr__ for ConfigObjects."""
295 return repr(self.ToDict())
296
297 def __eq__(self, other):
298 """Implement __eq__ for ConfigObjects."""
299 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
300
301 def UpgradeConfig(self):
302 """Fill defaults for missing configuration values.
303
304 This method will be called at configuration load time, and its
305 implementation will be object dependent.
306
307 """
308 pass
309
310
311 class TaggableObject(ConfigObject):
312 """An generic class supporting tags.
313
314 """
315 __slots__ = ["tags"]
316 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
317
318 @classmethod
319 def ValidateTag(cls, tag):
320 """Check if a tag is valid.
321
322 If the tag is invalid, an errors.TagError will be raised. The
323 function has no return value.
324
325 """
326 if not isinstance(tag, basestring):
327 raise errors.TagError("Invalid tag type (not a string)")
328 if len(tag) > constants.MAX_TAG_LEN:
329 raise errors.TagError("Tag too long (>%d characters)" %
330 constants.MAX_TAG_LEN)
331 if not tag:
332 raise errors.TagError("Tags cannot be empty")
333 if not cls.VALID_TAG_RE.match(tag):
334 raise errors.TagError("Tag contains invalid characters")
335
336 def GetTags(self):
337 """Return the tags list.
338
339 """
340 tags = getattr(self, "tags", None)
341 if tags is None:
342 tags = self.tags = set()
343 return tags
344
345 def AddTag(self, tag):
346 """Add a new tag.
347
348 """
349 self.ValidateTag(tag)
350 tags = self.GetTags()
351 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
352 raise errors.TagError("Too many tags")
353 self.GetTags().add(tag)
354
355 def RemoveTag(self, tag):
356 """Remove a tag.
357
358 """
359 self.ValidateTag(tag)
360 tags = self.GetTags()
361 try:
362 tags.remove(tag)
363 except KeyError:
364 raise errors.TagError("Tag not found")
365
366 def ToDict(self, _with_private=False):
367 """Taggable-object-specific conversion to standard python types.
368
369 This replaces the tags set with a list.
370
371 """
372 bo = super(TaggableObject, self).ToDict(_with_private=_with_private)
373
374 tags = bo.get("tags", None)
375 if isinstance(tags, set):
376 bo["tags"] = list(tags)
377 return bo
378
379 @classmethod
380 def FromDict(cls, val):
381 """Custom function for instances.
382
383 """
384 obj = super(TaggableObject, cls).FromDict(val)
385 if hasattr(obj, "tags") and isinstance(obj.tags, list):
386 obj.tags = set(obj.tags)
387 return obj
388
389
390 class MasterNetworkParameters(ConfigObject):
391 """Network configuration parameters for the master
392
393 @ivar uuid: master nodes UUID
394 @ivar ip: master IP
395 @ivar netmask: master netmask
396 @ivar netdev: master network device
397 @ivar ip_family: master IP family
398
399 """
400 __slots__ = [
401 "uuid",
402 "ip",
403 "netmask",
404 "netdev",
405 "ip_family",
406 ]
407
408
409 class ConfigData(ConfigObject):
410 """Top-level config object."""
411 __slots__ = [
412 "version",
413 "cluster",
414 "nodes",
415 "nodegroups",
416 "instances",
417 "networks",
418 "disks",
419 "filters",
420 "serial_no",
421 ] + _TIMESTAMPS
422
423 def ToDict(self, _with_private=False):
424 """Custom function for top-level config data.
425
426 This just replaces the list of nodes, instances, nodegroups,
427 networks, disks and the cluster with standard python types.
428
429 """
430 mydict = super(ConfigData, self).ToDict(_with_private=_with_private)
431 mydict["cluster"] = mydict["cluster"].ToDict()
432 for key in ("nodes", "instances", "nodegroups", "networks", "disks",
433 "filters"):
434 mydict[key] = outils.ContainerToDicts(mydict[key])
435
436 return mydict
437
438 @classmethod
439 def FromDict(cls, val):
440 """Custom function for top-level config data
441
442 """
443 obj = super(ConfigData, cls).FromDict(val)
444 obj.cluster = Cluster.FromDict(obj.cluster)
445 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
446 obj.instances = \
447 outils.ContainerFromDicts(obj.instances, dict, Instance)
448 obj.nodegroups = \
449 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
450 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
451 obj.disks = outils.ContainerFromDicts(obj.disks, dict, Disk)
452 obj.filters = outils.ContainerFromDicts(obj.filters, dict, Filter)
453 return obj
454
455 def HasAnyDiskOfType(self, dev_type):
456 """Check if in there is at disk of the given type in the configuration.
457
458 @type dev_type: L{constants.DTS_BLOCK}
459 @param dev_type: the type to look for
460 @rtype: boolean
461 @return: boolean indicating if a disk of the given type was found or not
462
463 """
464 for disk in self.disks.values():
465 if disk.IsBasedOnDiskType(dev_type):
466 return True
467 return False
468
469 def UpgradeConfig(self):
470 """Fill defaults for missing configuration values.
471
472 """
473 self.cluster.UpgradeConfig()
474 for node in self.nodes.values():
475 node.UpgradeConfig()
476 for instance in self.instances.values():
477 instance.UpgradeConfig()
478 self._UpgradeEnabledDiskTemplates()
479 if self.nodegroups is None:
480 self.nodegroups = {}
481 for nodegroup in self.nodegroups.values():
482 nodegroup.UpgradeConfig()
483 InstancePolicy.UpgradeDiskTemplates(
484 nodegroup.ipolicy, self.cluster.enabled_disk_templates)
485 if self.cluster.drbd_usermode_helper is None:
486 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
487 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
488 if self.networks is None:
489 self.networks = {}
490 for network in self.networks.values():
491 network.UpgradeConfig()
492 for disk in self.disks.values():
493 disk.UpgradeConfig()
494 if self.filters is None:
495 self.filters = {}
496
497 def _UpgradeEnabledDiskTemplates(self):
498 """Upgrade the cluster's enabled disk templates by inspecting the currently
499 enabled and/or used disk templates.
500
501 """
502 if not self.cluster.enabled_disk_templates:
503 template_set = \
504 set([inst.disk_template for inst in self.instances.values()])
505 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
506 if self.cluster.volume_group_name:
507 template_set.add(constants.DT_DRBD8)
508 template_set.add(constants.DT_PLAIN)
509 # Set enabled_disk_templates to the inferred disk templates. Order them
510 # according to a preference list that is based on Ganeti's history of
511 # supported disk templates.
512 self.cluster.enabled_disk_templates = []
513 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
514 if preferred_template in template_set:
515 self.cluster.enabled_disk_templates.append(preferred_template)
516 template_set.remove(preferred_template)
517 self.cluster.enabled_disk_templates.extend(list(template_set))
518 InstancePolicy.UpgradeDiskTemplates(
519 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
520
521
522 class NIC(ConfigObject):
523 """Config object representing a network card."""
524 __slots__ = ["name", "mac", "ip", "network",
525 "nicparams", "netinfo", "pci"] + _UUID
526
527 @classmethod
528 def CheckParameterSyntax(cls, nicparams):
529 """Check the given parameters for validity.
530
531 @type nicparams: dict
532 @param nicparams: dictionary with parameter names/value
533 @raise errors.ConfigurationError: when a parameter is not valid
534
535 """
536 mode = nicparams[constants.NIC_MODE]
537 if (mode not in constants.NIC_VALID_MODES and
538 mode != constants.VALUE_AUTO):
539 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
540
541 if (mode == constants.NIC_MODE_BRIDGED and
542 not nicparams[constants.NIC_LINK]):
543 raise errors.ConfigurationError("Missing bridged NIC link")
544
545
546 class Filter(ConfigObject):
547 """Config object representing a filter rule."""
548 __slots__ = ["watermark", "priority",
549 "predicates", "action", "reason_trail"] + _UUID
550
551
552 class Disk(ConfigObject):
553 """Config object representing a block device."""
554 __slots__ = [
555 "name",
556 "dev_type",
557 "logical_id",
558 "children",
559 "iv_name",
560 "size",
561 "mode",
562 "params",
563 "spindles",
564 "pci",
565 "serial_no",
566 # dynamic_params is special. It depends on the node this instance
567 # is sent to, and should not be persisted.
568 "dynamic_params"
569 ] + _UUID + _TIMESTAMPS
570
571 def _ComputeAllNodes(self):
572 """Compute the list of all nodes covered by a device and its children."""
573 def _Helper(nodes, device):
574 """Recursively compute nodes given a top device."""
575 if device.dev_type in constants.DTS_DRBD:
576 nodes.extend(device.logical_id[:2])
577 if device.children:
578 for child in device.children:
579 _Helper(nodes, child)
580
581 all_nodes = list()
582 _Helper(all_nodes, self)
583 return tuple(set(all_nodes))
584
585 all_nodes = property(_ComputeAllNodes, None, None,
586 "List of names of all the nodes of a disk")
587
588 def CreateOnSecondary(self):
589 """Test if this device needs to be created on a secondary node."""
590 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
591
592 def AssembleOnSecondary(self):
593 """Test if this device needs to be assembled on a secondary node."""
594 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
595
596 def OpenOnSecondary(self):
597 """Test if this device needs to be opened on a secondary node."""
598 return self.dev_type in (constants.DT_PLAIN,)
599
600 def StaticDevPath(self):
601 """Return the device path if this device type has a static one.
602
603 Some devices (LVM for example) live always at the same /dev/ path,
604 irrespective of their status. For such devices, we return this
605 path, for others we return None.
606
607 @warning: The path returned is not a normalized pathname; callers
608 should check that it is a valid path.
609
610 """
611 if self.dev_type == constants.DT_PLAIN:
612 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
613 elif self.dev_type == constants.DT_BLOCK:
614 return self.logical_id[1]
615 elif self.dev_type == constants.DT_RBD:
616 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
617 return None
618
619 def ChildrenNeeded(self):
620 """Compute the needed number of children for activation.
621
622 This method will return either -1 (all children) or a positive
623 number denoting the minimum number of children needed for
624 activation (only mirrored devices will usually return >=0).
625
626 Currently, only DRBD8 supports diskless activation (therefore we
627 return 0), for all other we keep the previous semantics and return
628 -1.
629
630 """
631 if self.dev_type == constants.DT_DRBD8:
632 return 0
633 return -1
634
635 def IsBasedOnDiskType(self, dev_type):
636 """Check if the disk or its children are based on the given type.
637
638 @type dev_type: L{constants.DTS_BLOCK}
639 @param dev_type: the type to look for
640 @rtype: boolean
641 @return: boolean indicating if a device of the given type was found or not
642
643 """
644 if self.children:
645 for child in self.children:
646 if child.IsBasedOnDiskType(dev_type):
647 return True
648 return self.dev_type == dev_type
649
650 def GetNodes(self, node_uuid):
651 """This function returns the nodes this device lives on.
652
653 Given the node on which the parent of the device lives on (or, in
654 case of a top-level device, the primary node of the devices'
655 instance), this function will return a list of nodes on which this
656 devices needs to (or can) be assembled.
657
658 """
659 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
660 constants.DT_BLOCK, constants.DT_RBD,
661 constants.DT_EXT, constants.DT_SHARED_FILE,
662 constants.DT_GLUSTER]:
663 result = [node_uuid]
664 elif self.dev_type in constants.DTS_DRBD:
665 result = [self.logical_id[0], self.logical_id[1]]
666 if node_uuid not in result:
667 raise errors.ConfigurationError("DRBD device passed unknown node")
668 else:
669 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
670 return result
671
672 def ComputeNodeTree(self, parent_node_uuid):
673 """Compute the node/disk tree for this disk and its children.
674
675 This method, given the node on which the parent disk lives, will
676 return the list of all (node UUID, disk) pairs which describe the disk
677 tree in the most compact way. For example, a drbd/lvm stack
678 will be returned as (primary_node, drbd) and (secondary_node, drbd)
679 which represents all the top-level devices on the nodes.
680
681 """
682 my_nodes = self.GetNodes(parent_node_uuid)
683 result = [(node, self) for node in my_nodes]
684 if not self.children:
685 # leaf device
686 return result
687 for node in my_nodes:
688 for child in self.children:
689 child_result = child.ComputeNodeTree(node)
690 if len(child_result) == 1:
691 # child (and all its descendants) is simple, doesn't split
692 # over multiple hosts, so we don't need to describe it, our
693 # own entry for this node describes it completely
694 continue
695 else:
696 # check if child nodes differ from my nodes; note that
697 # subdisk can differ from the child itself, and be instead
698 # one of its descendants
699 for subnode, subdisk in child_result:
700 if subnode not in my_nodes:
701 result.append((subnode, subdisk))
702 # otherwise child is under our own node, so we ignore this
703 # entry (but probably the other results in the list will
704 # be different)
705 return result
706
707 def ComputeGrowth(self, amount):
708 """Compute the per-VG growth requirements.
709
710 This only works for VG-based disks.
711
712 @type amount: integer
713 @param amount: the desired increase in (user-visible) disk space
714 @rtype: dict
715 @return: a dictionary of volume-groups and the required size
716
717 """
718 if self.dev_type == constants.DT_PLAIN:
719 return {self.logical_id[0]: amount}
720 elif self.dev_type == constants.DT_DRBD8:
721 if self.children:
722 return self.children[0].ComputeGrowth(amount)
723 else:
724 return {}
725 else:
726 # Other disk types do not require VG space
727 return {}
728
729 def RecordGrow(self, amount):
730 """Update the size of this disk after growth.
731
732 This method recurses over the disks's children and updates their
733 size correspondigly. The method needs to be kept in sync with the
734 actual algorithms from bdev.
735
736 """
737 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
738 constants.DT_RBD, constants.DT_EXT,
739 constants.DT_SHARED_FILE, constants.DT_GLUSTER):
740 self.size += amount
741 elif self.dev_type == constants.DT_DRBD8:
742 if self.children:
743 self.children[0].RecordGrow(amount)
744 self.size += amount
745 else:
746 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
747 " disk type %s" % self.dev_type)
748
749 def Update(self, size=None, mode=None, spindles=None):
750 """Apply changes to size, spindles and mode.
751
752 """
753 if self.dev_type == constants.DT_DRBD8:
754 if self.children:
755 self.children[0].Update(size=size, mode=mode)
756 else:
757 assert not self.children
758
759 if size is not None:
760 self.size = size
761 if mode is not None:
762 self.mode = mode
763 if spindles is not None:
764 self.spindles = spindles
765
766 def UnsetSize(self):
767 """Sets recursively the size to zero for the disk and its children.
768
769 """
770 if self.children:
771 for child in self.children:
772 child.UnsetSize()
773 self.size = 0
774
775 def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
776 """Updates the dynamic disk params for the given node.
777
778 This is mainly used for drbd, which needs ip/port configuration.
779
780 Arguments:
781 - target_node_uuid: the node UUID we wish to configure for
782 - nodes_ip: a mapping of node name to ip
783
784 The target_node must exist in nodes_ip, and should be one of the
785 nodes in the logical ID if this device is a DRBD device.
786
787 """
788 if self.children:
789 for child in self.children:
790 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
791
792 dyn_disk_params = {}
793 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
794 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
795 if target_node_uuid not in (pnode_uuid, snode_uuid):
796 # disk object is being sent to neither the primary nor the secondary
797 # node. reset the dynamic parameters, the target node is not
798 # supposed to use them.
799 self.dynamic_params = dyn_disk_params
800 return
801
802 pnode_ip = nodes_ip.get(pnode_uuid, None)
803 snode_ip = nodes_ip.get(snode_uuid, None)
804 if pnode_ip is None or snode_ip is None:
805 raise errors.ConfigurationError("Can't find primary or secondary node"
806 " for %s" % str(self))
807 if pnode_uuid == target_node_uuid:
808 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
809 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
810 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
811 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
812 else: # it must be secondary, we tested above
813 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
814 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
815 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
816 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
817
818 self.dynamic_params = dyn_disk_params
819
820 # pylint: disable=W0221
821 def ToDict(self, include_dynamic_params=False,
822 _with_private=False):
823 """Disk-specific conversion to standard python types.
824
825 This replaces the children lists of objects with lists of
826 standard python types.
827
828 """
829 bo = super(Disk, self).ToDict(_with_private=_with_private)
830 if not include_dynamic_params and "dynamic_params" in bo:
831 del bo["dynamic_params"]
832
833 if _with_private and "logical_id" in bo:
834 mutable_id = list(bo["logical_id"])
835 mutable_id[5] = mutable_id[5].Get()
836 bo["logical_id"] = tuple(mutable_id)
837
838 for attr in ("children",):
839 alist = bo.get(attr, None)
840 if alist:
841 bo[attr] = outils.ContainerToDicts(alist)
842 return bo
843
844 @classmethod
845 def FromDict(cls, val):
846 """Custom function for Disks
847
848 """
849 obj = super(Disk, cls).FromDict(val)
850 if obj.children:
851 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
852 if obj.logical_id and isinstance(obj.logical_id, list):
853 obj.logical_id = tuple(obj.logical_id)
854 if obj.dev_type in constants.DTS_DRBD:
855 # we need a tuple of length six here
856 if len(obj.logical_id) < 6:
857 obj.logical_id += (None,) * (6 - len(obj.logical_id))
858 # If we do have a tuple of length 6, make the last entry (secret key)
859 # private
860 elif (len(obj.logical_id) == 6 and
861 not isinstance(obj.logical_id[-1], serializer.Private)):
862 obj.logical_id = obj.logical_id[:-1] + \
863 (serializer.Private(obj.logical_id[-1]),)
864 return obj
865
866 def __str__(self):
867 """Custom str() formatter for disks.
868
869 """
870 if self.dev_type == constants.DT_PLAIN:
871 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
872 elif self.dev_type in constants.DTS_DRBD:
873 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
874 val = "<DRBD8("
875
876 val += ("hosts=%s/%d-%s/%d, port=%s, " %
877 (node_a, minor_a, node_b, minor_b, port))
878 if self.children and self.children.count(None) == 0:
879 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
880 else:
881 val += "no local storage"
882 else:
883 val = ("<Disk(type=%s, logical_id=%s, children=%s" %
884 (self.dev_type, self.logical_id, self.children))
885 if self.iv_name is None:
886 val += ", not visible"
887 else:
888 val += ", visible as /dev/%s" % self.iv_name
889 if self.spindles is not None:
890 val += ", spindles=%s" % self.spindles
891 if isinstance(self.size, int):
892 val += ", size=%dm)>" % self.size
893 else:
894 val += ", size='%s')>" % (self.size,)
895 return val
896
897 def Verify(self):
898 """Checks that this disk is correctly configured.
899
900 """
901 all_errors = []
902 if self.mode not in constants.DISK_ACCESS_SET:
903 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
904 return all_errors
905
906 def UpgradeConfig(self):
907 """Fill defaults for missing configuration values.
908
909 """
910 if self.children:
911 for child in self.children:
912 child.UpgradeConfig()
913
914 # FIXME: Make this configurable in Ganeti 2.7
915 # Params should be an empty dict that gets filled any time needed
916 # In case of ext template we allow arbitrary params that should not
917 # be overrided during a config reload/upgrade.
918 if not self.params or not isinstance(self.params, dict):
919 self.params = {}
920
921 # add here config upgrade for this disk
922 if self.serial_no is None:
923 self.serial_no = 1
924 if self.mtime is None:
925 self.mtime = time.time()
926 if self.ctime is None:
927 self.ctime = time.time()
928
929 # map of legacy device types (mapping differing LD constants to new
930 # DT constants)
931 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
932 if self.dev_type in LEG_DEV_TYPE_MAP:
933 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
934
935 @staticmethod
936 def ComputeLDParams(disk_template, disk_params):
937 """Computes Logical Disk parameters from Disk Template parameters.
938
939 @type disk_template: string
940 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
941 @type disk_params: dict
942 @param disk_params: disk template parameters;
943 dict(template_name -> parameters
944 @rtype: list(dict)
945 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
946 contains the LD parameters of the node. The tree is flattened in-order.
947
948 """
949 if disk_template not in constants.DISK_TEMPLATES:
950 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
951
952 assert disk_template in disk_params
953
954 result = list()
955 dt_params = disk_params[disk_template]
956
957 if disk_template == constants.DT_DRBD8:
958 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
959 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
960 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
961 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
962 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
963 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
964 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
965 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
966 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
967 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
968 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
969 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
970 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
971 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
972 }))
973
974 # data LV
975 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
976 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
977 }))
978
979 # metadata LV
980 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
981 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
982 }))
983
984 else:
985 defaults = constants.DISK_LD_DEFAULTS[disk_template]
986 values = {}
987 for field in defaults:
988 values[field] = dt_params[field]
989 result.append(FillDict(defaults, values))
990
991 return result
992
993
994 class InstancePolicy(ConfigObject):
995 """Config object representing instance policy limits dictionary.
996
997 Note that this object is not actually used in the config, it's just
998 used as a placeholder for a few functions.
999
1000 """
1001 @classmethod
1002 def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
1003 """Upgrades the ipolicy configuration."""
1004 if constants.IPOLICY_DTS in ipolicy:
1005 if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
1006 set(enabled_disk_templates)):
1007 ipolicy[constants.IPOLICY_DTS] = list(
1008 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
1009
1010 @classmethod
1011 def CheckParameterSyntax(cls, ipolicy, check_std):
1012 """ Check the instance policy for validity.
1013
1014 @type ipolicy: dict
1015 @param ipolicy: dictionary with min/max/std specs and policies
1016 @type check_std: bool
1017 @param check_std: Whether to check std value or just assume compliance
1018 @raise errors.ConfigurationError: when the policy is not legal
1019
1020 """
1021 InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
1022 if constants.IPOLICY_DTS in ipolicy:
1023 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
1024 for key in constants.IPOLICY_PARAMETERS:
1025 if key in ipolicy:
1026 InstancePolicy.CheckParameter(key, ipolicy[key])
1027 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1028 if wrong_keys:
1029 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
1030 utils.CommaJoin(wrong_keys))
1031
1032 @classmethod
1033 def _CheckIncompleteSpec(cls, spec, keyname):
1034 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
1035 if missing_params:
1036 msg = ("Missing instance specs parameters for %s: %s" %
1037 (keyname, utils.CommaJoin(missing_params)))
1038 raise errors.ConfigurationError(msg)
1039
1040 @classmethod
1041 def CheckISpecSyntax(cls, ipolicy, check_std):
1042 """Check the instance policy specs for validity.
1043
1044 @type ipolicy: dict
1045 @param ipolicy: dictionary with min/max/std specs
1046 @type check_std: bool
1047 @param check_std: Whether to check std value or just assume compliance
1048 @raise errors.ConfigurationError: when specs are not valid
1049
1050 """
1051 if constants.ISPECS_MINMAX not in ipolicy:
1052 # Nothing to check
1053 return
1054
1055 if check_std and constants.ISPECS_STD not in ipolicy:
1056 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
1057 raise errors.ConfigurationError(msg)
1058 stdspec = ipolicy.get(constants.ISPECS_STD)
1059 if check_std:
1060 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
1061
1062 if not ipolicy[constants.ISPECS_MINMAX]:
1063 raise errors.ConfigurationError("Empty minmax specifications")
1064 std_is_good = False
1065 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
1066 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
1067 if missing:
1068 msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
1069 raise errors.ConfigurationError(msg)
1070 for (key, spec) in minmaxspecs.items():
1071 InstancePolicy._CheckIncompleteSpec(spec, key)
1072
1073 spec_std_ok = True
1074 for param in constants.ISPECS_PARAMETERS:
1075 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
1076 param, check_std)
1077 spec_std_ok = spec_std_ok and par_std_ok
1078 std_is_good = std_is_good or spec_std_ok
1079 if not std_is_good:
1080 raise errors.ConfigurationError("Invalid std specifications")
1081
1082 @classmethod
1083 def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1084 """Check the instance policy specs for validity on a given key.
1085
1086 We check if the instance specs makes sense for a given key, that is
1087 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1088
1089 @type minmaxspecs: dict
1090 @param minmaxspecs: dictionary with min and max instance spec
1091 @type stdspec: dict
1092 @param stdspec: dictionary with standard instance spec
1093 @type name: string
1094 @param name: what are the limits for
1095 @type check_std: bool
1096 @param check_std: Whether to check std value or just assume compliance
1097 @rtype: bool
1098 @return: C{True} when specs are valid, C{False} when standard spec for the
1099 given name is not valid
1100 @raise errors.ConfigurationError: when min/max specs for the given name
1101 are not valid
1102
1103 """
1104 minspec = minmaxspecs[constants.ISPECS_MIN]
1105 maxspec = minmaxspecs[constants.ISPECS_MAX]
1106 min_v = minspec[name]
1107 max_v = maxspec[name]
1108
1109 if min_v > max_v:
1110 err = ("Invalid specification of min/max values for %s: %s/%s" %
1111 (name, min_v, max_v))
1112 raise errors.ConfigurationError(err)
1113 elif check_std:
1114 std_v = stdspec.get(name, min_v)
1115 return std_v >= min_v and std_v <= max_v
1116 else:
1117 return True
1118
1119 @classmethod
1120 def CheckDiskTemplates(cls, disk_templates):
1121 """Checks the disk templates for validity.
1122
1123 """
1124 if not disk_templates:
1125 raise errors.ConfigurationError("Instance policy must contain" +
1126 " at least one disk template")
1127 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1128 if wrong:
1129 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1130 utils.CommaJoin(wrong))
1131
1132 @classmethod
1133 def CheckParameter(cls, key, value):
1134 """Checks a parameter.
1135
1136 Currently we expect all parameters to be float values.
1137
1138 """
1139 try:
1140 float(value)
1141 except (TypeError, ValueError), err:
1142 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1143 " '%s', error: %s" % (key, value, err))
1144
1145
1146 def GetOSImage(osparams):
1147 """Gets the OS image value from the OS parameters.
1148
1149 @type osparams: L{dict} or NoneType
1150 @param osparams: OS parameters or None
1151
1152 @rtype: string or NoneType
1153 @return:
1154 value of OS image contained in OS parameters, or None if the OS
1155 parameters are None or the OS parameters do not contain an OS
1156 image
1157
1158 """
1159 if osparams is None:
1160 return None
1161 else:
1162 return osparams.get("os-image", None)
1163
1164
1165 def PutOSImage(osparams, os_image):
1166 """Update OS image value in the OS parameters
1167
1168 @type osparams: L{dict}
1169 @param osparams: OS parameters
1170
1171 @type os_image: string
1172 @param os_image: OS image
1173
1174 @rtype: NoneType
1175 @return: None
1176
1177 """
1178 osparams["os-image"] = os_image
1179
1180
1181 class Instance(TaggableObject):
1182 """Config object representing an instance."""
1183 __slots__ = [
1184 "name",
1185 "primary_node",
1186 "secondary_nodes",
1187 "os",
1188 "hypervisor",
1189 "hvparams",
1190 "beparams",
1191 "osparams",
1192 "osparams_private",
1193 "admin_state",
1194 "admin_state_source",
1195 "nics",
1196 "disks",
1197 "disks_info",
1198 "disk_template",
1199 "disks_active",
1200 "network_port",
1201 "serial_no",
1202 ] + _TIMESTAMPS + _UUID
1203
1204 def FindDisk(self, idx):
1205 """Find a disk given having a specified index.
1206
1207 This is just a wrapper that does validation of the index.
1208
1209 @type idx: int
1210 @param idx: the disk index
1211 @rtype: string
1212 @return: the corresponding disk's uuid
1213 @raise errors.OpPrereqError: when the given index is not valid
1214
1215 """
1216 try:
1217 idx = int(idx)
1218 return self.disks[idx]
1219 except (TypeError, ValueError), err:
1220 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1221 errors.ECODE_INVAL)
1222 except IndexError:
1223 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1224 " 0 to %d" % (idx, len(self.disks) - 1),
1225 errors.ECODE_INVAL)
1226
1227 def ToDict(self, _with_private=False):
1228 """Instance-specific conversion to standard python types.
1229
1230 This replaces the children lists of objects with lists of standard
1231 python types.
1232
1233 """
1234 bo = super(Instance, self).ToDict(_with_private=_with_private)
1235
1236 if _with_private:
1237 bo["osparams_private"] = self.osparams_private.Unprivate()
1238
1239 for attr in "nics", :
1240 alist = bo.get(attr, None)
1241 if alist:
1242 nlist = outils.ContainerToDicts(alist)
1243 else:
1244 nlist = []
1245 bo[attr] = nlist
1246 return bo
1247
1248 @classmethod
1249 def FromDict(cls, val):
1250 """Custom function for instances.
1251
1252 """
1253 if "admin_state" not in val:
1254 if val.get("admin_up", False):
1255 val["admin_state"] = constants.ADMINST_UP
1256 else:
1257 val["admin_state"] = constants.ADMINST_DOWN
1258 if "admin_up" in val:
1259 del val["admin_up"]
1260 obj = super(Instance, cls).FromDict(val)
1261 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1262
1263 # attribute 'disks_info' is only present when deserializing from a RPC
1264 # call in the backend
1265 disks_info = getattr(obj, "disks_info", None)
1266 if disks_info:
1267 obj.disks_info = outils.ContainerFromDicts(disks_info, list, Disk)
1268
1269 return obj
1270
1271 def UpgradeConfig(self):
1272 """Fill defaults for missing configuration values.
1273
1274 """
1275 if self.admin_state_source is None:
1276 self.admin_state_source = constants.ADMIN_SOURCE
1277 for nic in self.nics:
1278 nic.UpgradeConfig()
1279 if self.disks is None:
1280 self.disks = []
1281 if self.hvparams:
1282 for key in constants.HVC_GLOBALS:
1283 try:
1284 del self.hvparams[key]
1285 except KeyError:
1286 pass
1287 if self.osparams is None:
1288 self.osparams = {}
1289 if self.osparams_private is None:
1290 self.osparams_private = serializer.PrivateDict()
1291 UpgradeBeParams(self.beparams)
1292 if self.disks_active is None:
1293 self.disks_active = self.admin_state == constants.ADMINST_UP
1294
1295
1296 class OS(ConfigObject):
1297 """Config object representing an operating system.
1298
1299 @type supported_parameters: list
1300 @ivar supported_parameters: a list of tuples, name and description,
1301 containing the supported parameters by this OS
1302
1303 @type VARIANT_DELIM: string
1304 @cvar VARIANT_DELIM: the variant delimiter
1305
1306 """
1307 __slots__ = [
1308 "name",
1309 "path",
1310 "api_versions",
1311 "create_script",
1312 "create_script_untrusted",
1313 "export_script",
1314 "import_script",
1315 "rename_script",
1316 "verify_script",
1317 "supported_variants",
1318 "supported_parameters",
1319 ]
1320
1321 VARIANT_DELIM = "+"
1322
1323 @classmethod
1324 def SplitNameVariant(cls, name):
1325 """Splits the name into the proper name and variant.
1326
1327 @param name: the OS (unprocessed) name
1328 @rtype: list
1329 @return: a list of two elements; if the original name didn't
1330 contain a variant, it's returned as an empty string
1331
1332 """
1333 nv = name.split(cls.VARIANT_DELIM, 1)
1334 if len(nv) == 1:
1335 nv.append("")
1336 return nv
1337
1338 @classmethod
1339 def GetName(cls, name):
1340 """Returns the proper name of the os (without the variant).
1341
1342 @param name: the OS (unprocessed) name
1343
1344 """
1345 return cls.SplitNameVariant(name)[0]
1346
1347 @classmethod
1348 def GetVariant(cls, name):
1349 """Returns the variant the os (without the base name).
1350
1351 @param name: the OS (unprocessed) name
1352
1353 """
1354 return cls.SplitNameVariant(name)[1]
1355
1356 def IsTrusted(self):
1357 """Returns whether this OS is trusted.
1358
1359 @rtype: bool
1360 @return: L{True} if this OS is trusted, L{False} otherwise
1361
1362 """
1363 return not self.create_script_untrusted
1364
1365
1366 class ExtStorage(ConfigObject):
1367 """Config object representing an External Storage Provider.
1368
1369 """
1370 __slots__ = [
1371 "name",
1372 "path",
1373 "create_script",
1374 "remove_script",
1375 "grow_script",
1376 "attach_script",
1377 "detach_script",
1378 "setinfo_script",
1379 "verify_script",
1380 "snapshot_script",
1381 "supported_parameters",
1382 ]
1383
1384
1385 class NodeHvState(ConfigObject):
1386 """Hypvervisor state on a node.
1387
1388 @ivar mem_total: Total amount of memory
1389 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1390 available)
1391 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1392 rounding
1393 @ivar mem_inst: Memory used by instances living on node
1394 @ivar cpu_total: Total node CPU core count
1395 @ivar cpu_node: Number of CPU cores reserved for the node itself
1396
1397 """
1398 __slots__ = [
1399 "mem_total",
1400 "mem_node",
1401 "mem_hv",
1402 "mem_inst",
1403 "cpu_total",
1404 "cpu_node",
1405 ] + _TIMESTAMPS
1406
1407
1408 class NodeDiskState(ConfigObject):
1409 """Disk state on a node.
1410
1411 """
1412 __slots__ = [
1413 "total",
1414 "reserved",
1415 "overhead",
1416 ] + _TIMESTAMPS
1417
1418
1419 class Node(TaggableObject):
1420 """Config object representing a node.
1421
1422 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1423 @ivar hv_state_static: Hypervisor state overriden by user
1424 @ivar disk_state: Disk state (e.g. free space)
1425 @ivar disk_state_static: Disk state overriden by user
1426
1427 """
1428 __slots__ = [
1429 "name",
1430 "primary_ip",
1431 "secondary_ip",
1432 "serial_no",
1433 "master_candidate",
1434 "offline",
1435 "drained",
1436 "group",
1437 "master_capable",
1438 "vm_capable",
1439 "ndparams",
1440 "powered",
1441 "hv_state",
1442 "hv_state_static",
1443 "disk_state",
1444 "disk_state_static",
1445 ] + _TIMESTAMPS + _UUID
1446
1447 def UpgradeConfig(self):
1448 """Fill defaults for missing configuration values.
1449
1450 """
1451 # pylint: disable=E0203
1452 # because these are "defined" via slots, not manually
1453 if self.master_capable is None:
1454 self.master_capable = True
1455
1456 if self.vm_capable is None:
1457 self.vm_capable = True
1458
1459 if self.ndparams is None:
1460 self.ndparams = {}
1461 # And remove any global parameter
1462 for key in constants.NDC_GLOBALS:
1463 if key in self.ndparams:
1464 logging.warning("Ignoring %s node parameter for node %s",
1465 key, self.name)
1466 del self.ndparams[key]
1467
1468 if self.powered is None:
1469 self.powered = True
1470
1471 def ToDict(self, _with_private=False):
1472 """Custom function for serializing.
1473
1474 """
1475 data = super(Node, self).ToDict(_with_private=_with_private)
1476
1477 hv_state = data.get("hv_state", None)
1478 if hv_state is not None:
1479 data["hv_state"] = outils.ContainerToDicts(hv_state)
1480
1481 disk_state = data.get("disk_state", None)
1482 if disk_state is not None:
1483 data["disk_state"] = \
1484 dict((key, outils.ContainerToDicts(value))
1485 for (key, value) in disk_state.items())
1486
1487 return data
1488
1489 @classmethod
1490 def FromDict(cls, val):
1491 """Custom function for deserializing.
1492
1493 """
1494 obj = super(Node, cls).FromDict(val)
1495
1496 if obj.hv_state is not None:
1497 obj.hv_state = \
1498 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1499
1500 if obj.disk_state is not None:
1501 obj.disk_state = \
1502 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1503 for (key, value) in obj.disk_state.items())
1504
1505 return obj
1506
1507
1508 class NodeGroup(TaggableObject):
1509 """Config object representing a node group."""
1510 __slots__ = [
1511 "name",
1512 "members",
1513 "ndparams",
1514 "diskparams",
1515 "ipolicy",
1516 "serial_no",
1517 "hv_state_static",
1518 "disk_state_static",
1519 "alloc_policy",
1520 "networks",
1521 ] + _TIMESTAMPS + _UUID
1522
1523 def ToDict(self, _with_private=False):
1524 """Custom function for nodegroup.
1525
1526 This discards the members object, which gets recalculated and is only kept
1527 in memory.
1528
1529 """
1530 mydict = super(NodeGroup, self).ToDict(_with_private=_with_private)
1531 del mydict["members"]
1532 return mydict
1533
1534 @classmethod
1535 def FromDict(cls, val):
1536 """Custom function for nodegroup.
1537
1538 The members slot is initialized to an empty list, upon deserialization.
1539
1540 """
1541 obj = super(NodeGroup, cls).FromDict(val)
1542 obj.members = []
1543 return obj
1544
1545 def UpgradeConfig(self):
1546 """Fill defaults for missing configuration values.
1547
1548 """
1549 if self.ndparams is None:
1550 self.ndparams = {}
1551
1552 if self.serial_no is None:
1553 self.serial_no = 1
1554
1555 if self.alloc_policy is None:
1556 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1557
1558 # We only update mtime, and not ctime, since we would not be able
1559 # to provide a correct value for creation time.
1560 if self.mtime is None:
1561 self.mtime = time.time()
1562
1563 if self.diskparams is None:
1564 self.diskparams = {}
1565 if self.ipolicy is None:
1566 self.ipolicy = MakeEmptyIPolicy()
1567
1568 if self.networks is None:
1569 self.networks = {}
1570
1571 for network, netparams in self.networks.items():
1572 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1573
1574 def FillND(self, node):
1575 """Return filled out ndparams for L{objects.Node}
1576
1577 @type node: L{objects.Node}
1578 @param node: A Node object to fill
1579 @return a copy of the node's ndparams with defaults filled
1580
1581 """
1582 return self.SimpleFillND(node.ndparams)
1583
1584 def SimpleFillND(self, ndparams):
1585 """Fill a given ndparams dict with defaults.
1586
1587 @type ndparams: dict
1588 @param ndparams: the dict to fill
1589 @rtype: dict
1590 @return: a copy of the passed in ndparams with missing keys filled
1591 from the node group defaults
1592
1593 """
1594 return FillDict(self.ndparams, ndparams)
1595
1596
1597 class Cluster(TaggableObject):
1598 """Config object representing the cluster."""
1599 __slots__ = [
1600 "serial_no",
1601 "rsahostkeypub",
1602 "dsahostkeypub",
1603 "highest_used_port",
1604 "tcpudp_port_pool",
1605 "mac_prefix",
1606 "volume_group_name",
1607 "reserved_lvs",
1608 "drbd_usermode_helper",
1609 "default_bridge",
1610 "default_hypervisor",
1611 "master_node",
1612 "master_ip",
1613 "master_netdev",
1614 "master_netmask",
1615 "use_external_mip_script",
1616 "cluster_name",
1617 "file_storage_dir",
1618 "shared_file_storage_dir",
1619 "gluster_storage_dir",
1620 "enabled_hypervisors",
1621 "hvparams",
1622 "ipolicy",
1623 "os_hvp",
1624 "beparams",
1625 "osparams",
1626 "osparams_private_cluster",
1627 "nicparams",
1628 "ndparams",
1629 "diskparams",
1630 "candidate_pool_size",
1631 "modify_etc_hosts",
1632 "modify_ssh_setup",
1633 "maintain_node_health",
1634 "uid_pool",
1635 "default_iallocator",
1636 "default_iallocator_params",
1637 "hidden_os",
1638 "blacklisted_os",
1639 "primary_ip_family",
1640 "prealloc_wipe_disks",
1641 "hv_state_static",
1642 "disk_state_static",
1643 "enabled_disk_templates",
1644 "candidate_certs",
1645 "max_running_jobs",
1646 "max_tracked_jobs",
1647 "install_image",
1648 "instance_communication_network",
1649 "zeroing_image",
1650 "compression_tools",
1651 "enabled_user_shutdown",
1652 "data_collectors",
1653 ] + _TIMESTAMPS + _UUID
1654
1655 def UpgradeConfig(self):
1656 """Fill defaults for missing configuration values.
1657
1658 """
1659 # pylint: disable=E0203
1660 # because these are "defined" via slots, not manually
1661 if self.hvparams is None:
1662 self.hvparams = constants.HVC_DEFAULTS
1663 else:
1664 for hypervisor in constants.HYPER_TYPES:
1665 try:
1666 existing_params = self.hvparams[hypervisor]
1667 except KeyError:
1668 existing_params = {}
1669 self.hvparams[hypervisor] = FillDict(
1670 constants.HVC_DEFAULTS[hypervisor], existing_params)
1671
1672 if self.os_hvp is None:
1673 self.os_hvp = {}
1674
1675 if self.osparams is None:
1676 self.osparams = {}
1677 # osparams_private_cluster added in 2.12
1678 if self.osparams_private_cluster is None:
1679 self.osparams_private_cluster = {}
1680
1681 self.ndparams = UpgradeNDParams(self.ndparams)
1682
1683 self.beparams = UpgradeGroupedParams(self.beparams,
1684 constants.BEC_DEFAULTS)
1685 for beparams_group in self.beparams:
1686 UpgradeBeParams(self.beparams[beparams_group])
1687
1688 migrate_default_bridge = not self.nicparams
1689 self.nicparams = UpgradeGroupedParams(self.nicparams,
1690 constants.NICC_DEFAULTS)
1691 if migrate_default_bridge:
1692 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1693 self.default_bridge
1694
1695 if self.modify_etc_hosts is None:
1696 self.modify_etc_hosts = True
1697
1698 if self.modify_ssh_setup is None:
1699 self.modify_ssh_setup = True
1700
1701 # default_bridge is no longer used in 2.1. The slot is left there to
1702 # support auto-upgrading. It can be removed once we decide to deprecate
1703 # upgrading straight from 2.0.
1704 if self.default_bridge is not None:
1705 self.default_bridge = None
1706
1707 # default_hypervisor is just the first enabled one in 2.1. This slot and
1708 # code can be removed once upgrading straight from 2.0 is deprecated.
1709 if self.default_hypervisor is not None:
1710 self.enabled_hypervisors = ([self.default_hypervisor] +
1711 [hvname for hvname in self.enabled_hypervisors
1712 if hvname != self.default_hypervisor])
1713 self.default_hypervisor = None
1714
1715 # maintain_node_health added after 2.1.1
1716 if self.maintain_node_health is None:
1717 self.maintain_node_health = False
1718
1719 if self.uid_pool is None:
1720 self.uid_pool = []
1721
1722 if self.default_iallocator is None:
1723 self.default_iallocator = ""
1724
1725 if self.default_iallocator_params is None:
1726 self.default_iallocator_params = {}
1727
1728 # reserved_lvs added before 2.2
1729 if self.reserved_lvs is None:
1730 self.reserved_lvs = []
1731
1732 # hidden and blacklisted operating systems added before 2.2.1
1733 if self.hidden_os is None:
1734 self.hidden_os = []
1735
1736 if self.blacklisted_os is None:
1737 self.blacklisted_os = []
1738
1739 # primary_ip_family added before 2.3
1740 if self.primary_ip_family is None:
1741 self.primary_ip_family = AF_INET
1742
1743 if self.master_netmask is None:
1744 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1745 self.master_netmask = ipcls.iplen
1746
1747 if self.prealloc_wipe_disks is None:
1748 self.prealloc_wipe_disks = False
1749
1750 # shared_file_storage_dir added before 2.5
1751 if self.shared_file_storage_dir is None:
1752 self.shared_file_storage_dir = ""
1753
1754 # gluster_storage_dir added in 2.11
1755 if self.gluster_storage_dir is None:
1756 self.gluster_storage_dir = ""
1757
1758 if self.use_external_mip_script is None:
1759 self.use_external_mip_script = False
1760
1761 if self.diskparams:
1762 self.diskparams = UpgradeDiskParams(self.diskparams)
1763 else:
1764 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1765
1766 # instance policy added before 2.6
1767 if self.ipolicy is None:
1768 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1769 else:
1770 # we can either make sure to upgrade the ipolicy always, or only
1771 # do it in some corner cases (e.g. missing keys); note that this
1772 # will break any removal of keys from the ipolicy dict
1773 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1774 if wrongkeys:
1775 # These keys would be silently removed by FillIPolicy()
1776 msg = ("Cluster instance policy contains spurious keys: %s" %
1777 utils.CommaJoin(wrongkeys))
1778 raise errors.ConfigurationError(msg)
1779 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1780
1781 # hv_state_static added in 2.7
1782 if self.hv_state_static is None:
1783 self.hv_state_static = {}
1784 if self.disk_state_static is None:
1785 self.disk_state_static = {}
1786
1787 if self.candidate_certs is None:
1788 self.candidate_certs = {}
1789
1790 if self.max_running_jobs is None:
1791 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
1792
1793 if self.max_tracked_jobs is None:
1794 self.max_tracked_jobs = constants.LUXID_MAXIMAL_TRACKED_JOBS_DEFAULT
1795
1796 if self.instance_communication_network is None:
1797 self.instance_communication_network = ""
1798
1799 if self.install_image is None:
1800 self.install_image = ""
1801
1802 if self.compression_tools is None:
1803 self.compression_tools = constants.IEC_DEFAULT_TOOLS
1804
1805 if self.enabled_user_shutdown is None:
1806 self.enabled_user_shutdown = False
1807
1808 @property
1809 def primary_hypervisor(self):
1810 """The first hypervisor is the primary.
1811
1812 Useful, for example, for L{Node}'s hv/disk state.
1813
1814 """
1815 return self.enabled_hypervisors[0]
1816
1817 def ToDict(self, _with_private=False):
1818 """Custom function for cluster.
1819
1820 """
1821 mydict = super(Cluster, self).ToDict(_with_private=_with_private)
1822
1823 # Explicitly save private parameters.
1824 if _with_private:
1825 for os in mydict["osparams_private_cluster"]:
1826 mydict["osparams_private_cluster"][os] = \
1827 self.osparams_private_cluster[os].Unprivate()
1828
1829 if self.tcpudp_port_pool is None:
1830 tcpudp_port_pool = []
1831 else:
1832 tcpudp_port_pool = list(self.tcpudp_port_pool)
1833
1834 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1835
1836 return mydict
1837
1838 @classmethod
1839 def FromDict(cls, val):
1840 """Custom function for cluster.
1841
1842 """
1843 obj = super(Cluster, cls).FromDict(val)
1844
1845 if obj.tcpudp_port_pool is None:
1846 obj.tcpudp_port_pool = set()
1847 elif not isinstance(obj.tcpudp_port_pool, set):
1848 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1849
1850 return obj
1851
1852 def SimpleFillDP(self, diskparams):
1853 """Fill a given diskparams dict with cluster defaults.
1854
1855 @param diskparams: The diskparams
1856 @return: The defaults dict
1857
1858 """
1859 return FillDiskParams(self.diskparams, diskparams)
1860
1861 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1862 """Get the default hypervisor parameters for the cluster.
1863
1864 @param hypervisor: the hypervisor name
1865 @param os_name: if specified, we'll also update the defaults for this OS
1866 @param skip_keys: if passed, list of keys not to use
1867 @return: the defaults dict
1868
1869 """
1870 if skip_keys is None:
1871 skip_keys = []
1872
1873 fill_stack = [self.hvparams.get(hypervisor, {})]
1874 if os_name is not None:
1875 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1876 fill_stack.append(os_hvp)
1877
1878 ret_dict = {}
1879 for o_dict in fill_stack:
1880 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1881
1882 return ret_dict
1883
1884 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1885 """Fill a given hvparams dict with cluster defaults.
1886
1887 @type hv_name: string
1888 @param hv_name: the hypervisor to use
1889 @type os_name: string
1890 @param os_name: the OS to use for overriding the hypervisor defaults
1891 @type skip_globals: boolean
1892 @param skip_globals: if True, the global hypervisor parameters will
1893 not be filled
1894 @rtype: dict
1895 @return: a copy of the given hvparams with missing keys filled from
1896 the cluster defaults
1897
1898 """
1899 if skip_globals:
1900 skip_keys = constants.HVC_GLOBALS
1901 else:
1902 skip_keys = []
1903
1904 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1905 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1906
1907 def FillHV(self, instance, skip_globals=False):
1908 """Fill an instance's hvparams dict with cluster defaults.
1909
1910 @type instance: L{objects.Instance}
1911 @param instance: the instance parameter to fill
1912 @type skip_globals: boolean
1913 @param skip_globals: if True, the global hypervisor parameters will
1914 not be filled
1915 @rtype: dict
1916 @return: a copy of the instance's hvparams with missing keys filled from
1917 the cluster defaults
1918
1919 """
1920 return self.SimpleFillHV(instance.hypervisor, instance.os,
1921 instance.hvparams, skip_globals)
1922
1923 def SimpleFillBE(self, beparams):
1924 """Fill a given beparams dict with cluster defaults.
1925
1926 @type beparams: dict
1927 @param beparams: the dict to fill
1928 @rtype: dict
1929 @return: a copy of the passed in beparams with missing keys filled
1930 from the cluster defaults
1931
1932 """
1933 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1934
1935 def FillBE(self, instance):
1936 """Fill an instance's beparams dict with cluster defaults.
1937
1938 @type instance: L{objects.Instance}
1939 @param instance: the instance parameter to fill
1940 @rtype: dict
1941 @return: a copy of the instance's beparams with missing keys filled from
1942 the cluster defaults
1943
1944 """
1945 return self.SimpleFillBE(instance.beparams)
1946
1947 def SimpleFillNIC(self, nicparams):
1948 """Fill a given nicparams dict with cluster defaults.
1949
1950 @type nicparams: dict
1951 @param nicparams: the dict to fill
1952 @rtype: dict
1953 @return: a copy of the passed in nicparams with missing keys filled
1954 from the cluster defaults
1955
1956 """
1957 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1958
1959 def SimpleFillOS(self, os_name,
1960 os_params_public,
1961 os_params_private=None,
1962 os_params_secret=None):
1963 """Fill an instance's osparams dict with cluster defaults.
1964
1965 @type os_name: string
1966 @param os_name: the OS name to use
1967 @type os_params_public: dict
1968 @param os_params_public: the dict to fill with default values
1969 @type os_params_private: dict
1970 @param os_params_private: the dict with private fields to fill
1971 with default values. Not passing this field
1972 results in no private fields being added to the
1973 return value. Private fields will be wrapped in
1974 L{Private} objects.
1975 @type os_params_secret: dict
1976 @param os_params_secret: the dict with secret fields to fill
1977 with default values. Not passing this field
1978 results in no secret fields being added to the
1979 return value. Private fields will be wrapped in
1980 L{Private} objects.
1981 @rtype: dict
1982 @return: a copy of the instance's osparams with missing keys filled from
1983 the cluster defaults. Private and secret parameters are not included
1984 unless the respective optional parameters are supplied.
1985
1986 """
1987 if os_name is None:
1988 name_only = None
1989 else:
1990 name_only = OS.GetName(os_name)
1991
1992 defaults_base_public = self.osparams.get(name_only, {})
1993 defaults_public = FillDict(defaults_base_public,
1994 self.osparams.get(os_name, {}))
1995 params_public = FillDict(defaults_public, os_params_public)
1996
1997 if os_params_private is not None:
1998 defaults_base_private = self.osparams_private_cluster.get(name_only, {})
1999 defaults_private = FillDict(defaults_base_private,
2000 self.osparams_private_cluster.get(os_name,
2001 {}))
2002 params_private = FillDict(defaults_private, os_params_private)
2003 else:
2004 params_private = {}
2005
2006 if os_params_secret is not None:
2007 # There can't be default secret settings, so there's nothing to be done.
2008 params_secret = os_params_secret
2009 else:
2010 params_secret = {}
2011
2012 # Enforce that the set of keys be distinct:
2013 duplicate_keys = utils.GetRepeatedKeys(params_public,
2014 params_private,
2015 params_secret)
2016 if not duplicate_keys:
2017
2018 # Actually update them:
2019 params_public.update(params_private)
2020 params_public.update(params_secret)
2021
2022 return params_public
2023
2024 else:
2025
2026 def formatter(keys):
2027 return utils.CommaJoin(sorted(map(repr, keys))) if keys else "(none)"
2028
2029 #Lose the values.
2030 params_public = set(params_public)
2031 params_private = set(params_private)
2032 params_secret = set(params_secret)
2033
2034 msg = """Cannot assign multiple values to OS parameters.
2035
2036 Conflicting OS parameters that would have been set by this operation:
2037 - at public visibility: {public}
2038 - at private visibility: {private}
2039 - at secret visibility: {secret}
2040 """.format(dupes=formatter(duplicate_keys),
2041 public=formatter(params_public & duplicate_keys),
2042 private=formatter(params_private & duplicate_keys),
2043 secret=formatter(params_secret & duplicate_keys))
2044 raise errors.OpPrereqError(msg)
2045
2046 @staticmethod
2047 def SimpleFillHvState(hv_state):
2048 """Fill an hv_state sub dict with cluster defaults.
2049
2050 """
2051 return FillDict(constants.HVST_DEFAULTS, hv_state)
2052
2053 @staticmethod
2054 def SimpleFillDiskState(disk_state):
2055 """Fill an disk_state sub dict with cluster defaults.
2056
2057 """
2058 return FillDict(constants.DS_DEFAULTS, disk_state)
2059
2060 def FillND(self, node, nodegroup):
2061 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
2062
2063 @type node: L{objects.Node}
2064 @param node: A Node object to fill
2065 @type nodegroup: L{objects.NodeGroup}
2066 @param nodegroup: A Node object to fill
2067 @return a copy of the node's ndparams with defaults filled
2068
2069 """
2070 return self.SimpleFillND(nodegroup.FillND(node))
2071
2072 def FillNDGroup(self, nodegroup):
2073 """Return filled out ndparams for just L{objects.NodeGroup}
2074
2075 @type nodegroup: L{objects.NodeGroup}
2076 @param nodegroup: A Node object to fill
2077 @return a copy of the node group's ndparams with defaults filled
2078
2079 """
2080 return self.SimpleFillND(nodegroup.SimpleFillND({}))
2081
2082 def SimpleFillND(self, ndparams):
2083 """Fill a given ndparams dict with defaults.
2084
2085 @type ndparams: dict
2086 @param ndparams: the dict to fill
2087 @rtype: dict
2088 @return: a copy of the passed in ndparams with missing keys filled
2089 from the cluster defaults
2090
2091 """
2092 return FillDict(self.ndparams, ndparams)
2093
2094 def SimpleFillIPolicy(self, ipolicy):
2095 """ Fill instance policy dict with defaults.
2096
2097 @type ipolicy: dict
2098 @param ipolicy: the dict to fill
2099 @rtype: dict
2100 @return: a copy of passed ipolicy with missing keys filled from
2101 the cluster defaults
2102
2103 """
2104 return FillIPolicy(self.ipolicy, ipolicy)
2105
2106 def IsDiskTemplateEnabled(self, disk_template):
2107 """Checks if a particular disk template is enabled.
2108
2109 """
2110 return utils.storage.IsDiskTemplateEnabled(
2111 disk_template, self.enabled_disk_templates)
2112
2113 def IsFileStorageEnabled(self):
2114 """Checks if file storage is enabled.
2115
2116 """
2117 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
2118
2119 def IsSharedFileStorageEnabled(self):
2120 """Checks if shared file storage is enabled.
2121
2122 """
2123 return utils.storage.IsSharedFileStorageEnabled(
2124 self.enabled_disk_templates)
2125
2126
2127 class BlockDevStatus(ConfigObject):
2128 """Config object representing the status of a block device."""
2129 __slots__ = [
2130 "dev_path",
2131 "major",
2132 "minor",
2133 "sync_percent",
2134 "estimated_time",
2135 "is_degraded",
2136 "ldisk_status",
2137 ]
2138
2139
2140 class ImportExportStatus(ConfigObject):
2141 """Config object representing the status of an import or export."""
2142 __slots__ = [
2143 "recent_output",
2144 "listen_port",
2145 "connected",
2146 "progress_mbytes",
2147 "progress_throughput",
2148 "progress_eta",
2149 "progress_percent",
2150 "exit_status",
2151 "error_message",
2152 ] + _TIMESTAMPS
2153
2154
2155 class ImportExportOptions(ConfigObject):
2156 """Options for import/export daemon
2157
2158 @ivar key_name: X509 key name (None for cluster certificate)
2159 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
2160 @ivar compress: Compression tool to use
2161 @ivar magic: Used to ensure the connection goes to the right disk
2162 @ivar ipv6: Whether to use IPv6
2163 @ivar connect_timeout: Number of seconds for establishing connection
2164
2165 """
2166 __slots__ = [
2167 "key_name",
2168 "ca_pem",
2169 "compress",
2170 "magic",
2171 "ipv6",
2172 "connect_timeout",
2173 ]
2174
2175
2176 class ConfdRequest(ConfigObject):
2177 """Object holding a confd request.
2178
2179 @ivar protocol: confd protocol version
2180 @ivar type: confd query type
2181 @ivar query: query request
2182 @ivar rsalt: requested reply salt
2183
2184 """
2185 __slots__ = [
2186 "protocol",
2187 "type",
2188 "query",
2189 "rsalt",
2190 ]
2191
2192
2193 class ConfdReply(ConfigObject):
2194 """Object holding a confd reply.
2195
2196 @ivar protocol: confd protocol version
2197 @ivar status: reply status code (ok, error)
2198 @ivar answer: confd query reply
2199 @ivar serial: configuration serial number
2200
2201 """
2202 __slots__ = [
2203 "protocol",
2204 "status",
2205 "answer",
2206 "serial",
2207 ]
2208
2209
2210 class QueryFieldDefinition(ConfigObject):
2211 """Object holding a query field definition.
2212
2213 @ivar name: Field name
2214 @ivar title: Human-readable title
2215 @ivar kind: Field type
2216 @ivar doc: Human-readable description
2217
2218 """
2219 __slots__ = [
2220 "name",
2221 "title",
2222 "kind",
2223 "doc",
2224 ]
2225
2226
2227 class _QueryResponseBase(ConfigObject):
2228 __slots__ = [
2229 "fields",
2230 ]
2231
2232 def ToDict(self, _with_private=False):
2233 """Custom function for serializing.
2234
2235 """
2236 mydict = super(_QueryResponseBase, self).ToDict()
2237 mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2238 return mydict
2239
2240 @classmethod
2241 def FromDict(cls, val):
2242 """Custom function for de-serializing.
2243
2244 """
2245 obj = super(_QueryResponseBase, cls).FromDict(val)
2246 obj.fields = \
2247 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2248 return obj
2249
2250
2251 class QueryResponse(_QueryResponseBase):
2252 """Object holding the response to a query.
2253
2254 @ivar fields: List of L{QueryFieldDefinition} objects
2255 @ivar data: Requested data
2256
2257 """
2258 __slots__ = [
2259 "data",
2260 ]
2261
2262
2263 class QueryFieldsRequest(ConfigObject):
2264 """Object holding a request for querying available fields.
2265
2266 """
2267 __slots__ = [
2268 "what",
2269 "fields",
2270 ]
2271
2272
2273 class QueryFieldsResponse(_QueryResponseBase):
2274 """Object holding the response to a query for fields.
2275
2276 @ivar fields: List of L{QueryFieldDefinition} objects
2277
2278 """
2279 __slots__ = []
2280
2281
2282 class MigrationStatus(ConfigObject):
2283 """Object holding the status of a migration.
2284
2285 """
2286 __slots__ = [
2287 "status",
2288 "transferred_ram",
2289 "total_ram",
2290 ]
2291
2292
2293 class InstanceConsole(ConfigObject):
2294 """Object describing how to access the console of an instance.
2295
2296 """
2297 __slots__ = [
2298 "instance",
2299 "kind",
2300 "message",
2301 "host",
2302 "port",
2303 "user",
2304 "command",
2305 "display",
2306 ]
2307
2308 def Validate(self):
2309 """Validates contents of this object.
2310
2311 """
2312 assert self.kind in constants.CONS_ALL, "Unknown console type"
2313 assert self.instance, "Missing instance name"
2314 assert self.message or self.kind in [constants.CONS_SSH,
2315 constants.CONS_SPICE,
2316 constants.CONS_VNC]
2317 assert self.host or self.kind == constants.CONS_MESSAGE
2318 assert self.port or self.kind in [constants.CONS_MESSAGE,
2319 constants.CONS_SSH]
2320 assert self.user or self.kind in [constants.CONS_MESSAGE,
2321 constants.CONS_SPICE,
2322 constants.CONS_VNC]
2323 assert self.command or self.kind in [constants.CONS_MESSAGE,
2324 constants.CONS_SPICE,
2325 constants.CONS_VNC]
2326 assert self.display or self.kind in [constants.CONS_MESSAGE,
2327 constants.CONS_SPICE,
2328 constants.CONS_SSH]
2329
2330
2331 class Network(TaggableObject):
2332 """Object representing a network definition for ganeti.
2333
2334 """
2335 __slots__ = [
2336 "name",
2337 "serial_no",
2338 "mac_prefix",
2339 "network",
2340 "network6",
2341 "gateway",
2342 "gateway6",
2343 "reservations",
2344 "ext_reservations",
2345 ] + _TIMESTAMPS + _UUID
2346
2347 def HooksDict(self, prefix=""):
2348 """Export a dictionary used by hooks with a network's information.
2349
2350 @type prefix: String
2351 @param prefix: Prefix to prepend to the dict entries
2352
2353 """
2354 result = {
2355 "%sNETWORK_NAME" % prefix: self.name,
2356 "%sNETWORK_UUID" % prefix: self.uuid,
2357 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2358 }
2359 if self.network:
2360 result["%sNETWORK_SUBNET" % prefix] = self.network
2361 if self.gateway:
2362 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2363 if self.network6:
2364 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2365 if self.gateway6:
2366 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2367 if self.mac_prefix:
2368 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2369
2370 return result
2371
2372 @classmethod
2373 def FromDict(cls, val):
2374 """Custom function for networks.
2375
2376 Remove deprecated network_type and family.
2377
2378 """
2379 if "network_type" in val:
2380 del val["network_type"]
2381 if "family" in val:
2382 del val["family"]
2383 obj = super(Network, cls).FromDict(val)
2384 return obj
2385
2386
2387 # need to inherit object in order to use super()
2388 class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2389 """Simple wrapper over ConfigParse that allows serialization.
2390
2391 This class is basically ConfigParser.SafeConfigParser with two
2392 additional methods that allow it to serialize/unserialize to/from a
2393 buffer.
2394
2395 """
2396 def Dumps(self):
2397 """Dump this instance and return the string representation."""
2398 buf = StringIO()
2399 self.write(buf)
2400 return buf.getvalue()
2401
2402 @classmethod
2403 def Loads(cls, data):
2404 """Load data from a string."""
2405 buf = StringIO(data)
2406 cfp = cls()
2407 cfp.readfp(buf)
2408 return cfp
2409
2410 def get(self, section, option, **kwargs):
2411 value = None
2412 try:
2413 value = super(SerializableConfigParser, self).get(section, option,
2414 **kwargs)
2415 if value.lower() == constants.VALUE_NONE:
2416 value = None
2417 except ConfigParser.NoOptionError:
2418 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2419 match = r.match(option)
2420 if match:
2421 pass
2422 else:
2423 raise
2424
2425 return value
2426
2427
2428 class LvmPvInfo(ConfigObject):
2429 """Information about an LVM physical volume (PV).
2430
2431 @type name: string
2432 @ivar name: name of the PV
2433 @type vg_name: string
2434 @ivar vg_name: name of the volume group containing the PV
2435 @type size: float
2436 @ivar size: size of the PV in MiB
2437 @type free: float
2438 @ivar free: free space in the PV, in MiB
2439 @type attributes: string
2440 @ivar attributes: PV attributes
2441 @type lv_list: list of strings
2442 @ivar lv_list: names of the LVs hosted on the PV
2443 """
2444 __slots__ = [
2445 "name",
2446 "vg_name",
2447 "size",
2448 "free",
2449 "attributes",
2450 "lv_list"
2451 ]
2452
2453 def IsEmpty(self):
2454 """Is this PV empty?
2455
2456 """
2457 return self.size <= (self.free + 1)
2458
2459 def IsAllocatable(self):
2460 """Is this PV allocatable?
2461
2462 """
2463 return ("a" in self.attributes)