Catch IOError of SSH files when removing node
[ganeti-github.git] / lib / objects.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Transportable objects for Ganeti.
32
33 This module provides small, mostly data-only objects which are safe to
34 pass to and from external parties.
35
36 """
37
38 # pylint: disable=E0203,W0201,R0902
39
40 # E0203: Access to member %r before its definition, since we use
41 # objects.py which doesn't explicitly initialise its members
42
43 # W0201: Attribute '%s' defined outside __init__
44
45 # R0902: Allow instances of these objects to have more than 20 attributes
46
47 import ConfigParser
48 import re
49 import copy
50 import logging
51 import time
52 from cStringIO import StringIO
53
54 from ganeti import errors
55 from ganeti import constants
56 from ganeti import netutils
57 from ganeti import outils
58 from ganeti import utils
59 from ganeti import serializer
60
61 from socket import AF_INET
62
63
64 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
65 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network",
66 "Filter"]
67
68 _TIMESTAMPS = ["ctime", "mtime"]
69 _UUID = ["uuid"]
70
71
72 def FillDict(defaults_dict, custom_dict, skip_keys=None):
73 """Basic function to apply settings on top a default dict.
74
75 @type defaults_dict: dict
76 @param defaults_dict: dictionary holding the default values
77 @type custom_dict: dict
78 @param custom_dict: dictionary holding customized value
79 @type skip_keys: list
80 @param skip_keys: which keys not to fill
81 @rtype: dict
82 @return: dict with the 'full' values
83
84 """
85 ret_dict = copy.deepcopy(defaults_dict)
86 ret_dict.update(custom_dict)
87 if skip_keys:
88 for k in skip_keys:
89 if k in ret_dict:
90 del ret_dict[k]
91 return ret_dict
92
93
94 def FillIPolicy(default_ipolicy, custom_ipolicy):
95 """Fills an instance policy with defaults.
96
97 """
98 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
99 ret_dict = copy.deepcopy(custom_ipolicy)
100 for key in default_ipolicy:
101 if key not in ret_dict:
102 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
103 elif key == constants.ISPECS_STD:
104 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
105 return ret_dict
106
107
108 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
109 """Fills the disk parameter defaults.
110
111 @see: L{FillDict} for parameters and return value
112
113 """
114 return dict((dt, FillDict(default_dparams.get(dt, {}),
115 custom_dparams.get(dt, {}),
116 skip_keys=skip_keys))
117 for dt in constants.DISK_TEMPLATES)
118
119
120 def UpgradeGroupedParams(target, defaults):
121 """Update all groups for the target parameter.
122
123 @type target: dict of dicts
124 @param target: {group: {parameter: value}}
125 @type defaults: dict
126 @param defaults: default parameter values
127
128 """
129 if target is None:
130 target = {constants.PP_DEFAULT: defaults}
131 else:
132 for group in target:
133 target[group] = FillDict(defaults, target[group])
134 return target
135
136
137 def UpgradeBeParams(target):
138 """Update the be parameters dict to the new format.
139
140 @type target: dict
141 @param target: "be" parameters dict
142
143 """
144 if constants.BE_MEMORY in target:
145 memory = target[constants.BE_MEMORY]
146 target[constants.BE_MAXMEM] = memory
147 target[constants.BE_MINMEM] = memory
148 del target[constants.BE_MEMORY]
149
150
151 def UpgradeDiskParams(diskparams):
152 """Upgrade the disk parameters.
153
154 @type diskparams: dict
155 @param diskparams: disk parameters to upgrade
156 @rtype: dict
157 @return: the upgraded disk parameters dict
158
159 """
160 if not diskparams:
161 result = {}
162 else:
163 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
164
165 return result
166
167
168 def UpgradeNDParams(ndparams):
169 """Upgrade ndparams structure.
170
171 @type ndparams: dict
172 @param ndparams: disk parameters to upgrade
173 @rtype: dict
174 @return: the upgraded node parameters dict
175
176 """
177 if ndparams is None:
178 ndparams = {}
179
180 if (constants.ND_OOB_PROGRAM in ndparams and
181 ndparams[constants.ND_OOB_PROGRAM] is None):
182 # will be reset by the line below
183 del ndparams[constants.ND_OOB_PROGRAM]
184 return FillDict(constants.NDC_DEFAULTS, ndparams)
185
186
187 def MakeEmptyIPolicy():
188 """Create empty IPolicy dictionary.
189
190 """
191 return {}
192
193
194 class ConfigObject(outils.ValidatedSlots):
195 """A generic config object.
196
197 It has the following properties:
198
199 - provides somewhat safe recursive unpickling and pickling for its classes
200 - unset attributes which are defined in slots are always returned
201 as None instead of raising an error
202
203 Classes derived from this must always declare __slots__ (we use many
204 config objects and the memory reduction is useful)
205
206 """
207 __slots__ = []
208
209 def __getattr__(self, name):
210 if name not in self.GetAllSlots():
211 raise AttributeError("Invalid object attribute %s.%s" %
212 (type(self).__name__, name))
213 return None
214
215 def __setstate__(self, state):
216 slots = self.GetAllSlots()
217 for name in state:
218 if name in slots:
219 setattr(self, name, state[name])
220
221 def Validate(self):
222 """Validates the slots.
223
224 This method returns L{None} if the validation succeeds, or raises
225 an exception otherwise.
226
227 This method must be implemented by the child classes.
228
229 @rtype: NoneType
230 @return: L{None}, if the validation succeeds
231
232 @raise Exception: validation fails
233
234 """
235
236 def ToDict(self, _with_private=False):
237 """Convert to a dict holding only standard python types.
238
239 The generic routine just dumps all of this object's attributes in
240 a dict. It does not work if the class has children who are
241 ConfigObjects themselves (e.g. the nics list in an Instance), in
242 which case the object should subclass the function in order to
243 make sure all objects returned are only standard python types.
244
245 Private fields can be included or not with the _with_private switch.
246 The actual implementation of this switch is left for those subclassses
247 with private fields to implement.
248
249 @type _with_private: bool
250 @param _with_private: if True, the object will leak its private fields in
251 the dictionary representation. If False, the values
252 will be replaced with None.
253
254 """
255 result = {}
256 for name in self.GetAllSlots():
257 value = getattr(self, name, None)
258 if value is not None:
259 result[name] = value
260 return result
261
262 __getstate__ = ToDict
263
264 @classmethod
265 def FromDict(cls, val):
266 """Create an object from a dictionary.
267
268 This generic routine takes a dict, instantiates a new instance of
269 the given class, and sets attributes based on the dict content.
270
271 As for `ToDict`, this does not work if the class has children
272 who are ConfigObjects themselves (e.g. the nics list in an
273 Instance), in which case the object should subclass the function
274 and alter the objects.
275
276 """
277 if not isinstance(val, dict):
278 raise errors.ConfigurationError("Invalid object passed to FromDict:"
279 " expected dict, got %s" % type(val))
280 val_str = dict([(str(k), v) for k, v in val.iteritems()])
281 obj = cls(**val_str) # pylint: disable=W0142
282 return obj
283
284 def Copy(self):
285 """Makes a deep copy of the current object and its children.
286
287 """
288 dict_form = self.ToDict()
289 clone_obj = self.__class__.FromDict(dict_form)
290 return clone_obj
291
292 def __repr__(self):
293 """Implement __repr__ for ConfigObjects."""
294 return repr(self.ToDict())
295
296 def __eq__(self, other):
297 """Implement __eq__ for ConfigObjects."""
298 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
299
300 def UpgradeConfig(self):
301 """Fill defaults for missing configuration values.
302
303 This method will be called at configuration load time, and its
304 implementation will be object dependent.
305
306 """
307 pass
308
309
310 class TaggableObject(ConfigObject):
311 """An generic class supporting tags.
312
313 """
314 __slots__ = ["tags"]
315 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
316
317 @classmethod
318 def ValidateTag(cls, tag):
319 """Check if a tag is valid.
320
321 If the tag is invalid, an errors.TagError will be raised. The
322 function has no return value.
323
324 """
325 if not isinstance(tag, basestring):
326 raise errors.TagError("Invalid tag type (not a string)")
327 if len(tag) > constants.MAX_TAG_LEN:
328 raise errors.TagError("Tag too long (>%d characters)" %
329 constants.MAX_TAG_LEN)
330 if not tag:
331 raise errors.TagError("Tags cannot be empty")
332 if not cls.VALID_TAG_RE.match(tag):
333 raise errors.TagError("Tag contains invalid characters")
334
335 def GetTags(self):
336 """Return the tags list.
337
338 """
339 tags = getattr(self, "tags", None)
340 if tags is None:
341 tags = self.tags = set()
342 return tags
343
344 def AddTag(self, tag):
345 """Add a new tag.
346
347 """
348 self.ValidateTag(tag)
349 tags = self.GetTags()
350 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
351 raise errors.TagError("Too many tags")
352 self.GetTags().add(tag)
353
354 def RemoveTag(self, tag):
355 """Remove a tag.
356
357 """
358 self.ValidateTag(tag)
359 tags = self.GetTags()
360 try:
361 tags.remove(tag)
362 except KeyError:
363 raise errors.TagError("Tag not found")
364
365 def ToDict(self, _with_private=False):
366 """Taggable-object-specific conversion to standard python types.
367
368 This replaces the tags set with a list.
369
370 """
371 bo = super(TaggableObject, self).ToDict(_with_private=_with_private)
372
373 tags = bo.get("tags", None)
374 if isinstance(tags, set):
375 bo["tags"] = list(tags)
376 return bo
377
378 @classmethod
379 def FromDict(cls, val):
380 """Custom function for instances.
381
382 """
383 obj = super(TaggableObject, cls).FromDict(val)
384 if hasattr(obj, "tags") and isinstance(obj.tags, list):
385 obj.tags = set(obj.tags)
386 return obj
387
388
389 class MasterNetworkParameters(ConfigObject):
390 """Network configuration parameters for the master
391
392 @ivar uuid: master nodes UUID
393 @ivar ip: master IP
394 @ivar netmask: master netmask
395 @ivar netdev: master network device
396 @ivar ip_family: master IP family
397
398 """
399 __slots__ = [
400 "uuid",
401 "ip",
402 "netmask",
403 "netdev",
404 "ip_family",
405 ]
406
407
408 class ConfigData(ConfigObject):
409 """Top-level config object."""
410 __slots__ = [
411 "version",
412 "cluster",
413 "nodes",
414 "nodegroups",
415 "instances",
416 "networks",
417 "disks",
418 "filters",
419 "serial_no",
420 ] + _TIMESTAMPS
421
422 def ToDict(self, _with_private=False):
423 """Custom function for top-level config data.
424
425 This just replaces the list of nodes, instances, nodegroups,
426 networks, disks and the cluster with standard python types.
427
428 """
429 mydict = super(ConfigData, self).ToDict(_with_private=_with_private)
430 mydict["cluster"] = mydict["cluster"].ToDict()
431 for key in ("nodes", "instances", "nodegroups", "networks", "disks",
432 "filters"):
433 mydict[key] = outils.ContainerToDicts(mydict[key])
434
435 return mydict
436
437 @classmethod
438 def FromDict(cls, val):
439 """Custom function for top-level config data
440
441 """
442 obj = super(ConfigData, cls).FromDict(val)
443 obj.cluster = Cluster.FromDict(obj.cluster)
444 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
445 obj.instances = \
446 outils.ContainerFromDicts(obj.instances, dict, Instance)
447 obj.nodegroups = \
448 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
449 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
450 obj.disks = outils.ContainerFromDicts(obj.disks, dict, Disk)
451 obj.filters = outils.ContainerFromDicts(obj.filters, dict, Filter)
452 return obj
453
454 def DisksOfType(self, dev_type):
455 """Check if in there is at disk of the given type in the configuration.
456
457 @type dev_type: L{constants.DTS_BLOCK}
458 @param dev_type: the type to look for
459 @rtype: list of disks
460 @return: all disks of the dev_type
461
462 """
463
464 return [disk for disk in self.disks.values()
465 if disk.IsBasedOnDiskType(dev_type)]
466
467 def UpgradeConfig(self):
468 """Fill defaults for missing configuration values.
469
470 """
471 self.cluster.UpgradeConfig()
472 for node in self.nodes.values():
473 node.UpgradeConfig()
474 for instance in self.instances.values():
475 instance.UpgradeConfig()
476 self._UpgradeEnabledDiskTemplates()
477 if self.nodegroups is None:
478 self.nodegroups = {}
479 for nodegroup in self.nodegroups.values():
480 nodegroup.UpgradeConfig()
481 InstancePolicy.UpgradeDiskTemplates(
482 nodegroup.ipolicy, self.cluster.enabled_disk_templates)
483 if self.cluster.drbd_usermode_helper is None:
484 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
485 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
486 if self.networks is None:
487 self.networks = {}
488 for network in self.networks.values():
489 network.UpgradeConfig()
490 for disk in self.disks.values():
491 disk.UpgradeConfig()
492 if self.filters is None:
493 self.filters = {}
494
495 def _UpgradeEnabledDiskTemplates(self):
496 """Upgrade the cluster's enabled disk templates by inspecting the currently
497 enabled and/or used disk templates.
498
499 """
500 if not self.cluster.enabled_disk_templates:
501 template_set = \
502 set([d.dev_type for d in self.disks.values()])
503 if any(not inst.disks for inst in self.instances.values()):
504 template_set.add(constants.DT_DISKLESS)
505 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
506 if self.cluster.volume_group_name:
507 template_set.add(constants.DT_DRBD8)
508 template_set.add(constants.DT_PLAIN)
509 # Set enabled_disk_templates to the inferred disk templates. Order them
510 # according to a preference list that is based on Ganeti's history of
511 # supported disk templates.
512 self.cluster.enabled_disk_templates = []
513 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
514 if preferred_template in template_set:
515 self.cluster.enabled_disk_templates.append(preferred_template)
516 template_set.remove(preferred_template)
517 self.cluster.enabled_disk_templates.extend(list(template_set))
518 InstancePolicy.UpgradeDiskTemplates(
519 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
520
521
522 class NIC(ConfigObject):
523 """Config object representing a network card."""
524 __slots__ = ["name", "mac", "ip", "network",
525 "nicparams", "netinfo", "pci"] + _UUID
526
527 @classmethod
528 def CheckParameterSyntax(cls, nicparams):
529 """Check the given parameters for validity.
530
531 @type nicparams: dict
532 @param nicparams: dictionary with parameter names/value
533 @raise errors.ConfigurationError: when a parameter is not valid
534
535 """
536 mode = nicparams[constants.NIC_MODE]
537 if (mode not in constants.NIC_VALID_MODES and
538 mode != constants.VALUE_AUTO):
539 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
540
541 if (mode == constants.NIC_MODE_BRIDGED and
542 not nicparams[constants.NIC_LINK]):
543 raise errors.ConfigurationError("Missing bridged NIC link")
544
545
546 class Filter(ConfigObject):
547 """Config object representing a filter rule."""
548 __slots__ = ["watermark", "priority",
549 "predicates", "action", "reason_trail"] + _UUID
550
551
552 class Disk(ConfigObject):
553 """Config object representing a block device."""
554 __slots__ = [
555 "forthcoming",
556 "name",
557 "dev_type",
558 "logical_id",
559 "children",
560 "nodes",
561 "iv_name",
562 "size",
563 "mode",
564 "params",
565 "spindles",
566 "pci",
567 "serial_no",
568 # dynamic_params is special. It depends on the node this instance
569 # is sent to, and should not be persisted.
570 "dynamic_params"
571 ] + _UUID + _TIMESTAMPS
572
573 def _ComputeAllNodes(self):
574 """Compute the list of all nodes covered by a device and its children."""
575 def _Helper(nodes, device):
576 """Recursively compute nodes given a top device."""
577 if device.dev_type in constants.DTS_DRBD:
578 nodes.extend(device.logical_id[:2])
579 if device.children:
580 for child in device.children:
581 _Helper(nodes, child)
582
583 all_nodes = list()
584 _Helper(all_nodes, self)
585 return tuple(set(all_nodes))
586
587 all_nodes = property(_ComputeAllNodes, None, None,
588 "List of names of all the nodes of a disk")
589
590 def CreateOnSecondary(self):
591 """Test if this device needs to be created on a secondary node."""
592 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
593
594 def AssembleOnSecondary(self):
595 """Test if this device needs to be assembled on a secondary node."""
596 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
597
598 def OpenOnSecondary(self):
599 """Test if this device needs to be opened on a secondary node."""
600 return self.dev_type in (constants.DT_PLAIN,)
601
602 def SupportsSnapshots(self):
603 """Test if this device supports snapshots."""
604 return self.dev_type in constants.DTS_SNAPSHOT_CAPABLE
605
606 def StaticDevPath(self):
607 """Return the device path if this device type has a static one.
608
609 Some devices (LVM for example) live always at the same /dev/ path,
610 irrespective of their status. For such devices, we return this
611 path, for others we return None.
612
613 @warning: The path returned is not a normalized pathname; callers
614 should check that it is a valid path.
615
616 """
617 if self.dev_type == constants.DT_PLAIN:
618 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
619 elif self.dev_type == constants.DT_BLOCK:
620 return self.logical_id[1]
621 elif self.dev_type == constants.DT_RBD:
622 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
623 return None
624
625 def ChildrenNeeded(self):
626 """Compute the needed number of children for activation.
627
628 This method will return either -1 (all children) or a positive
629 number denoting the minimum number of children needed for
630 activation (only mirrored devices will usually return >=0).
631
632 Currently, only DRBD8 supports diskless activation (therefore we
633 return 0), for all other we keep the previous semantics and return
634 -1.
635
636 """
637 if self.dev_type == constants.DT_DRBD8:
638 return 0
639 return -1
640
641 def IsBasedOnDiskType(self, dev_type):
642 """Check if the disk or its children are based on the given type.
643
644 @type dev_type: L{constants.DTS_BLOCK}
645 @param dev_type: the type to look for
646 @rtype: boolean
647 @return: boolean indicating if a device of the given type was found or not
648
649 """
650 if self.children:
651 for child in self.children:
652 if child.IsBasedOnDiskType(dev_type):
653 return True
654 return self.dev_type == dev_type
655
656 def GetNodes(self, node_uuid):
657 """This function returns the nodes this device lives on.
658
659 Given the node on which the parent of the device lives on (or, in
660 case of a top-level device, the primary node of the devices'
661 instance), this function will return a list of nodes on which this
662 devices needs to (or can) be assembled.
663
664 """
665 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
666 constants.DT_BLOCK, constants.DT_RBD,
667 constants.DT_EXT, constants.DT_SHARED_FILE,
668 constants.DT_GLUSTER]:
669 result = [node_uuid]
670 elif self.dev_type in constants.DTS_DRBD:
671 result = [self.logical_id[0], self.logical_id[1]]
672 if node_uuid not in result:
673 raise errors.ConfigurationError("DRBD device passed unknown node")
674 else:
675 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
676 return result
677
678 def ComputeNodeTree(self, parent_node_uuid):
679 """Compute the node/disk tree for this disk and its children.
680
681 This method, given the node on which the parent disk lives, will
682 return the list of all (node UUID, disk) pairs which describe the disk
683 tree in the most compact way. For example, a drbd/lvm stack
684 will be returned as (primary_node, drbd) and (secondary_node, drbd)
685 which represents all the top-level devices on the nodes.
686
687 """
688 my_nodes = self.GetNodes(parent_node_uuid)
689 result = [(node, self) for node in my_nodes]
690 if not self.children:
691 # leaf device
692 return result
693 for node in my_nodes:
694 for child in self.children:
695 child_result = child.ComputeNodeTree(node)
696 if len(child_result) == 1:
697 # child (and all its descendants) is simple, doesn't split
698 # over multiple hosts, so we don't need to describe it, our
699 # own entry for this node describes it completely
700 continue
701 else:
702 # check if child nodes differ from my nodes; note that
703 # subdisk can differ from the child itself, and be instead
704 # one of its descendants
705 for subnode, subdisk in child_result:
706 if subnode not in my_nodes:
707 result.append((subnode, subdisk))
708 # otherwise child is under our own node, so we ignore this
709 # entry (but probably the other results in the list will
710 # be different)
711 return result
712
713 def ComputeGrowth(self, amount):
714 """Compute the per-VG growth requirements.
715
716 This only works for VG-based disks.
717
718 @type amount: integer
719 @param amount: the desired increase in (user-visible) disk space
720 @rtype: dict
721 @return: a dictionary of volume-groups and the required size
722
723 """
724 if self.dev_type == constants.DT_PLAIN:
725 return {self.logical_id[0]: amount}
726 elif self.dev_type == constants.DT_DRBD8:
727 if self.children:
728 return self.children[0].ComputeGrowth(amount)
729 else:
730 return {}
731 else:
732 # Other disk types do not require VG space
733 return {}
734
735 def RecordGrow(self, amount):
736 """Update the size of this disk after growth.
737
738 This method recurses over the disks's children and updates their
739 size correspondigly. The method needs to be kept in sync with the
740 actual algorithms from bdev.
741
742 """
743 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
744 constants.DT_RBD, constants.DT_EXT,
745 constants.DT_SHARED_FILE, constants.DT_GLUSTER):
746 self.size += amount
747 elif self.dev_type == constants.DT_DRBD8:
748 if self.children:
749 self.children[0].RecordGrow(amount)
750 self.size += amount
751 else:
752 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
753 " disk type %s" % self.dev_type)
754
755 def Update(self, size=None, mode=None, spindles=None):
756 """Apply changes to size, spindles and mode.
757
758 """
759 if self.dev_type == constants.DT_DRBD8:
760 if self.children:
761 self.children[0].Update(size=size, mode=mode)
762 else:
763 assert not self.children
764
765 if size is not None:
766 self.size = size
767 if mode is not None:
768 self.mode = mode
769 if spindles is not None:
770 self.spindles = spindles
771
772 def UnsetSize(self):
773 """Sets recursively the size to zero for the disk and its children.
774
775 """
776 if self.children:
777 for child in self.children:
778 child.UnsetSize()
779 self.size = 0
780
781 def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
782 """Updates the dynamic disk params for the given node.
783
784 This is mainly used for drbd, which needs ip/port configuration.
785
786 Arguments:
787 - target_node_uuid: the node UUID we wish to configure for
788 - nodes_ip: a mapping of node name to ip
789
790 The target_node must exist in nodes_ip, and should be one of the
791 nodes in the logical ID if this device is a DRBD device.
792
793 """
794 if self.children:
795 for child in self.children:
796 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
797
798 dyn_disk_params = {}
799 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
800 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
801 if target_node_uuid not in (pnode_uuid, snode_uuid):
802 # disk object is being sent to neither the primary nor the secondary
803 # node. reset the dynamic parameters, the target node is not
804 # supposed to use them.
805 self.dynamic_params = dyn_disk_params
806 return
807
808 pnode_ip = nodes_ip.get(pnode_uuid, None)
809 snode_ip = nodes_ip.get(snode_uuid, None)
810 if pnode_ip is None or snode_ip is None:
811 raise errors.ConfigurationError("Can't find primary or secondary node"
812 " for %s" % str(self))
813 if pnode_uuid == target_node_uuid:
814 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
815 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
816 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
817 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
818 else: # it must be secondary, we tested above
819 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
820 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
821 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
822 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
823
824 self.dynamic_params = dyn_disk_params
825
826 # pylint: disable=W0221
827 def ToDict(self, include_dynamic_params=False,
828 _with_private=False):
829 """Disk-specific conversion to standard python types.
830
831 This replaces the children lists of objects with lists of
832 standard python types.
833
834 """
835 bo = super(Disk, self).ToDict(_with_private=_with_private)
836 if not include_dynamic_params and "dynamic_params" in bo:
837 del bo["dynamic_params"]
838
839 if _with_private and "logical_id" in bo:
840 mutable_id = list(bo["logical_id"])
841 mutable_id[5] = mutable_id[5].Get()
842 bo["logical_id"] = tuple(mutable_id)
843
844 for attr in ("children",):
845 alist = bo.get(attr, None)
846 if alist:
847 bo[attr] = outils.ContainerToDicts(alist)
848 return bo
849
850 @classmethod
851 def FromDict(cls, val):
852 """Custom function for Disks
853
854 """
855 obj = super(Disk, cls).FromDict(val)
856 if obj.children:
857 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
858 if obj.logical_id and isinstance(obj.logical_id, list):
859 obj.logical_id = tuple(obj.logical_id)
860 if obj.dev_type in constants.DTS_DRBD:
861 # we need a tuple of length six here
862 if len(obj.logical_id) < 6:
863 obj.logical_id += (None,) * (6 - len(obj.logical_id))
864 # If we do have a tuple of length 6, make the last entry (secret key)
865 # private
866 elif (len(obj.logical_id) == 6 and
867 not isinstance(obj.logical_id[-1], serializer.Private)):
868 obj.logical_id = obj.logical_id[:-1] + \
869 (serializer.Private(obj.logical_id[-1]),)
870 return obj
871
872 def __str__(self):
873 """Custom str() formatter for disks.
874
875 """
876 if self.dev_type == constants.DT_PLAIN:
877 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
878 elif self.dev_type in constants.DTS_DRBD:
879 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
880 val = "<DRBD8("
881
882 val += ("hosts=%s/%d-%s/%d, port=%s, " %
883 (node_a, minor_a, node_b, minor_b, port))
884 if self.children and self.children.count(None) == 0:
885 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
886 else:
887 val += "no local storage"
888 else:
889 val = ("<Disk(type=%s, logical_id=%s, children=%s" %
890 (self.dev_type, self.logical_id, self.children))
891 if self.iv_name is None:
892 val += ", not visible"
893 else:
894 val += ", visible as /dev/%s" % self.iv_name
895 if self.spindles is not None:
896 val += ", spindles=%s" % self.spindles
897 if isinstance(self.size, int):
898 val += ", size=%dm)>" % self.size
899 else:
900 val += ", size='%s')>" % (self.size,)
901 return val
902
903 def Verify(self):
904 """Checks that this disk is correctly configured.
905
906 """
907 all_errors = []
908 if self.mode not in constants.DISK_ACCESS_SET:
909 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
910 return all_errors
911
912 def UpgradeConfig(self):
913 """Fill defaults for missing configuration values.
914
915 """
916 if self.children:
917 for child in self.children:
918 child.UpgradeConfig()
919
920 # FIXME: Make this configurable in Ganeti 2.7
921 # Params should be an empty dict that gets filled any time needed
922 # In case of ext template we allow arbitrary params that should not
923 # be overrided during a config reload/upgrade.
924 if not self.params or not isinstance(self.params, dict):
925 self.params = {}
926
927 # add here config upgrade for this disk
928 if self.serial_no is None:
929 self.serial_no = 1
930 if self.mtime is None:
931 self.mtime = time.time()
932 if self.ctime is None:
933 self.ctime = time.time()
934
935 # map of legacy device types (mapping differing LD constants to new
936 # DT constants)
937 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
938 if self.dev_type in LEG_DEV_TYPE_MAP:
939 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
940
941 @staticmethod
942 def ComputeLDParams(disk_template, disk_params):
943 """Computes Logical Disk parameters from Disk Template parameters.
944
945 @type disk_template: string
946 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
947 @type disk_params: dict
948 @param disk_params: disk template parameters;
949 dict(template_name -> parameters
950 @rtype: list(dict)
951 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
952 contains the LD parameters of the node. The tree is flattened in-order.
953
954 """
955 if disk_template not in constants.DISK_TEMPLATES:
956 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
957
958 assert disk_template in disk_params
959
960 result = list()
961 dt_params = disk_params[disk_template]
962
963 if disk_template == constants.DT_DRBD8:
964 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
965 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
966 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
967 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
968 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
969 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
970 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
971 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
972 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
973 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
974 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
975 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
976 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
977 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
978 }))
979
980 # data LV
981 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
982 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
983 }))
984
985 # metadata LV
986 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
987 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
988 }))
989
990 else:
991 defaults = constants.DISK_LD_DEFAULTS[disk_template]
992 values = {}
993 for field in defaults:
994 values[field] = dt_params[field]
995 result.append(FillDict(defaults, values))
996
997 return result
998
999
1000 class InstancePolicy(ConfigObject):
1001 """Config object representing instance policy limits dictionary.
1002
1003 Note that this object is not actually used in the config, it's just
1004 used as a placeholder for a few functions.
1005
1006 """
1007 @classmethod
1008 def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
1009 """Upgrades the ipolicy configuration."""
1010 if constants.IPOLICY_DTS in ipolicy:
1011 if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
1012 set(enabled_disk_templates)):
1013 ipolicy[constants.IPOLICY_DTS] = list(
1014 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
1015
1016 @classmethod
1017 def CheckParameterSyntax(cls, ipolicy, check_std):
1018 """ Check the instance policy for validity.
1019
1020 @type ipolicy: dict
1021 @param ipolicy: dictionary with min/max/std specs and policies
1022 @type check_std: bool
1023 @param check_std: Whether to check std value or just assume compliance
1024 @raise errors.ConfigurationError: when the policy is not legal
1025
1026 """
1027 InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
1028 if constants.IPOLICY_DTS in ipolicy:
1029 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
1030 for key in constants.IPOLICY_PARAMETERS:
1031 if key in ipolicy:
1032 InstancePolicy.CheckParameter(key, ipolicy[key])
1033 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1034 if wrong_keys:
1035 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
1036 utils.CommaJoin(wrong_keys))
1037
1038 @classmethod
1039 def _CheckIncompleteSpec(cls, spec, keyname):
1040 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
1041 if missing_params:
1042 msg = ("Missing instance specs parameters for %s: %s" %
1043 (keyname, utils.CommaJoin(missing_params)))
1044 raise errors.ConfigurationError(msg)
1045
1046 @classmethod
1047 def CheckISpecSyntax(cls, ipolicy, check_std):
1048 """Check the instance policy specs for validity.
1049
1050 @type ipolicy: dict
1051 @param ipolicy: dictionary with min/max/std specs
1052 @type check_std: bool
1053 @param check_std: Whether to check std value or just assume compliance
1054 @raise errors.ConfigurationError: when specs are not valid
1055
1056 """
1057 if constants.ISPECS_MINMAX not in ipolicy:
1058 # Nothing to check
1059 return
1060
1061 if check_std and constants.ISPECS_STD not in ipolicy:
1062 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
1063 raise errors.ConfigurationError(msg)
1064 stdspec = ipolicy.get(constants.ISPECS_STD)
1065 if check_std:
1066 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
1067
1068 if not ipolicy[constants.ISPECS_MINMAX]:
1069 raise errors.ConfigurationError("Empty minmax specifications")
1070 std_is_good = False
1071 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
1072 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
1073 if missing:
1074 msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
1075 raise errors.ConfigurationError(msg)
1076 for (key, spec) in minmaxspecs.items():
1077 InstancePolicy._CheckIncompleteSpec(spec, key)
1078
1079 spec_std_ok = True
1080 for param in constants.ISPECS_PARAMETERS:
1081 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
1082 param, check_std)
1083 spec_std_ok = spec_std_ok and par_std_ok
1084 std_is_good = std_is_good or spec_std_ok
1085 if not std_is_good:
1086 raise errors.ConfigurationError("Invalid std specifications")
1087
1088 @classmethod
1089 def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1090 """Check the instance policy specs for validity on a given key.
1091
1092 We check if the instance specs makes sense for a given key, that is
1093 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1094
1095 @type minmaxspecs: dict
1096 @param minmaxspecs: dictionary with min and max instance spec
1097 @type stdspec: dict
1098 @param stdspec: dictionary with standard instance spec
1099 @type name: string
1100 @param name: what are the limits for
1101 @type check_std: bool
1102 @param check_std: Whether to check std value or just assume compliance
1103 @rtype: bool
1104 @return: C{True} when specs are valid, C{False} when standard spec for the
1105 given name is not valid
1106 @raise errors.ConfigurationError: when min/max specs for the given name
1107 are not valid
1108
1109 """
1110 minspec = minmaxspecs[constants.ISPECS_MIN]
1111 maxspec = minmaxspecs[constants.ISPECS_MAX]
1112 min_v = minspec[name]
1113 max_v = maxspec[name]
1114
1115 if min_v > max_v:
1116 err = ("Invalid specification of min/max values for %s: %s/%s" %
1117 (name, min_v, max_v))
1118 raise errors.ConfigurationError(err)
1119 elif check_std:
1120 std_v = stdspec.get(name, min_v)
1121 return std_v >= min_v and std_v <= max_v
1122 else:
1123 return True
1124
1125 @classmethod
1126 def CheckDiskTemplates(cls, disk_templates):
1127 """Checks the disk templates for validity.
1128
1129 """
1130 if not disk_templates:
1131 raise errors.ConfigurationError("Instance policy must contain" +
1132 " at least one disk template")
1133 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1134 if wrong:
1135 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1136 utils.CommaJoin(wrong))
1137
1138 @classmethod
1139 def CheckParameter(cls, key, value):
1140 """Checks a parameter.
1141
1142 Currently we expect all parameters to be float values.
1143
1144 """
1145 try:
1146 float(value)
1147 except (TypeError, ValueError), err:
1148 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1149 " '%s', error: %s" % (key, value, err))
1150
1151
1152 def GetOSImage(osparams):
1153 """Gets the OS image value from the OS parameters.
1154
1155 @type osparams: L{dict} or NoneType
1156 @param osparams: OS parameters or None
1157
1158 @rtype: string or NoneType
1159 @return:
1160 value of OS image contained in OS parameters, or None if the OS
1161 parameters are None or the OS parameters do not contain an OS
1162 image
1163
1164 """
1165 if osparams is None:
1166 return None
1167 else:
1168 return osparams.get("os-image", None)
1169
1170
1171 def PutOSImage(osparams, os_image):
1172 """Update OS image value in the OS parameters
1173
1174 @type osparams: L{dict}
1175 @param osparams: OS parameters
1176
1177 @type os_image: string
1178 @param os_image: OS image
1179
1180 @rtype: NoneType
1181 @return: None
1182
1183 """
1184 osparams["os-image"] = os_image
1185
1186
1187 class Instance(TaggableObject):
1188 """Config object representing an instance."""
1189 __slots__ = [
1190 "forthcoming",
1191 "name",
1192 "primary_node",
1193 "secondary_nodes",
1194 "os",
1195 "hypervisor",
1196 "hvparams",
1197 "beparams",
1198 "osparams",
1199 "osparams_private",
1200 "admin_state",
1201 "admin_state_source",
1202 "nics",
1203 "disks",
1204 "disks_info",
1205 "disk_template",
1206 "disks_active",
1207 "network_port",
1208 "serial_no",
1209 ] + _TIMESTAMPS + _UUID
1210
1211 def FindDisk(self, idx):
1212 """Find a disk given having a specified index.
1213
1214 This is just a wrapper that does validation of the index.
1215
1216 @type idx: int
1217 @param idx: the disk index
1218 @rtype: string
1219 @return: the corresponding disk's uuid
1220 @raise errors.OpPrereqError: when the given index is not valid
1221
1222 """
1223 try:
1224 idx = int(idx)
1225 return self.disks[idx]
1226 except (TypeError, ValueError), err:
1227 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1228 errors.ECODE_INVAL)
1229 except IndexError:
1230 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1231 " 0 to %d" % (idx, len(self.disks) - 1),
1232 errors.ECODE_INVAL)
1233
1234 def ToDict(self, _with_private=False):
1235 """Instance-specific conversion to standard python types.
1236
1237 This replaces the children lists of objects with lists of standard
1238 python types.
1239
1240 """
1241 bo = super(Instance, self).ToDict(_with_private=_with_private)
1242
1243 if _with_private:
1244 bo["osparams_private"] = self.osparams_private.Unprivate()
1245
1246 for attr in "nics", :
1247 alist = bo.get(attr, None)
1248 if alist:
1249 nlist = outils.ContainerToDicts(alist)
1250 else:
1251 nlist = []
1252 bo[attr] = nlist
1253
1254 if 'disk_template' in bo:
1255 del bo['disk_template']
1256
1257 return bo
1258
1259 @classmethod
1260 def FromDict(cls, val):
1261 """Custom function for instances.
1262
1263 """
1264 if "admin_state" not in val:
1265 if val.get("admin_up", False):
1266 val["admin_state"] = constants.ADMINST_UP
1267 else:
1268 val["admin_state"] = constants.ADMINST_DOWN
1269 if "admin_up" in val:
1270 del val["admin_up"]
1271 obj = super(Instance, cls).FromDict(val)
1272 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1273
1274 # attribute 'disks_info' is only present when deserializing from a RPC
1275 # call in the backend
1276 disks_info = getattr(obj, "disks_info", None)
1277 if disks_info:
1278 obj.disks_info = outils.ContainerFromDicts(disks_info, list, Disk)
1279
1280 return obj
1281
1282 def UpgradeConfig(self):
1283 """Fill defaults for missing configuration values.
1284
1285 """
1286 if self.admin_state_source is None:
1287 self.admin_state_source = constants.ADMIN_SOURCE
1288 for nic in self.nics:
1289 nic.UpgradeConfig()
1290 if self.disks is None:
1291 self.disks = []
1292 if self.hvparams:
1293 for key in constants.HVC_GLOBALS:
1294 try:
1295 del self.hvparams[key]
1296 except KeyError:
1297 pass
1298 if self.osparams is None:
1299 self.osparams = {}
1300 if self.osparams_private is None:
1301 self.osparams_private = serializer.PrivateDict()
1302 UpgradeBeParams(self.beparams)
1303 if self.disks_active is None:
1304 self.disks_active = self.admin_state == constants.ADMINST_UP
1305
1306
1307 class OS(ConfigObject):
1308 """Config object representing an operating system.
1309
1310 @type supported_parameters: list
1311 @ivar supported_parameters: a list of tuples, name and description,
1312 containing the supported parameters by this OS
1313
1314 @type VARIANT_DELIM: string
1315 @cvar VARIANT_DELIM: the variant delimiter
1316
1317 """
1318 __slots__ = [
1319 "name",
1320 "path",
1321 "api_versions",
1322 "create_script",
1323 "create_script_untrusted",
1324 "export_script",
1325 "import_script",
1326 "rename_script",
1327 "verify_script",
1328 "supported_variants",
1329 "supported_parameters",
1330 ]
1331
1332 VARIANT_DELIM = "+"
1333
1334 @classmethod
1335 def SplitNameVariant(cls, name):
1336 """Splits the name into the proper name and variant.
1337
1338 @param name: the OS (unprocessed) name
1339 @rtype: list
1340 @return: a list of two elements; if the original name didn't
1341 contain a variant, it's returned as an empty string
1342
1343 """
1344 nv = name.split(cls.VARIANT_DELIM, 1)
1345 if len(nv) == 1:
1346 nv.append("")
1347 return nv
1348
1349 @classmethod
1350 def GetName(cls, name):
1351 """Returns the proper name of the os (without the variant).
1352
1353 @param name: the OS (unprocessed) name
1354
1355 """
1356 return cls.SplitNameVariant(name)[0]
1357
1358 @classmethod
1359 def GetVariant(cls, name):
1360 """Returns the variant the os (without the base name).
1361
1362 @param name: the OS (unprocessed) name
1363
1364 """
1365 return cls.SplitNameVariant(name)[1]
1366
1367 def IsTrusted(self):
1368 """Returns whether this OS is trusted.
1369
1370 @rtype: bool
1371 @return: L{True} if this OS is trusted, L{False} otherwise
1372
1373 """
1374 return not self.create_script_untrusted
1375
1376
1377 class ExtStorage(ConfigObject):
1378 """Config object representing an External Storage Provider.
1379
1380 """
1381 __slots__ = [
1382 "name",
1383 "path",
1384 "create_script",
1385 "remove_script",
1386 "grow_script",
1387 "attach_script",
1388 "detach_script",
1389 "setinfo_script",
1390 "verify_script",
1391 "snapshot_script",
1392 "open_script",
1393 "close_script",
1394 "supported_parameters",
1395 ]
1396
1397
1398 class NodeHvState(ConfigObject):
1399 """Hypvervisor state on a node.
1400
1401 @ivar mem_total: Total amount of memory
1402 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1403 available)
1404 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1405 rounding
1406 @ivar mem_inst: Memory used by instances living on node
1407 @ivar cpu_total: Total node CPU core count
1408 @ivar cpu_node: Number of CPU cores reserved for the node itself
1409
1410 """
1411 __slots__ = [
1412 "mem_total",
1413 "mem_node",
1414 "mem_hv",
1415 "mem_inst",
1416 "cpu_total",
1417 "cpu_node",
1418 ] + _TIMESTAMPS
1419
1420
1421 class NodeDiskState(ConfigObject):
1422 """Disk state on a node.
1423
1424 """
1425 __slots__ = [
1426 "total",
1427 "reserved",
1428 "overhead",
1429 ] + _TIMESTAMPS
1430
1431
1432 class Node(TaggableObject):
1433 """Config object representing a node.
1434
1435 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1436 @ivar hv_state_static: Hypervisor state overriden by user
1437 @ivar disk_state: Disk state (e.g. free space)
1438 @ivar disk_state_static: Disk state overriden by user
1439
1440 """
1441 __slots__ = [
1442 "name",
1443 "primary_ip",
1444 "secondary_ip",
1445 "serial_no",
1446 "master_candidate",
1447 "offline",
1448 "drained",
1449 "group",
1450 "master_capable",
1451 "vm_capable",
1452 "ndparams",
1453 "powered",
1454 "hv_state",
1455 "hv_state_static",
1456 "disk_state",
1457 "disk_state_static",
1458 ] + _TIMESTAMPS + _UUID
1459
1460 def UpgradeConfig(self):
1461 """Fill defaults for missing configuration values.
1462
1463 """
1464 # pylint: disable=E0203
1465 # because these are "defined" via slots, not manually
1466 if self.master_capable is None:
1467 self.master_capable = True
1468
1469 if self.vm_capable is None:
1470 self.vm_capable = True
1471
1472 if self.ndparams is None:
1473 self.ndparams = {}
1474 # And remove any global parameter
1475 for key in constants.NDC_GLOBALS:
1476 if key in self.ndparams:
1477 logging.warning("Ignoring %s node parameter for node %s",
1478 key, self.name)
1479 del self.ndparams[key]
1480
1481 if self.powered is None:
1482 self.powered = True
1483
1484 def ToDict(self, _with_private=False):
1485 """Custom function for serializing.
1486
1487 """
1488 data = super(Node, self).ToDict(_with_private=_with_private)
1489
1490 hv_state = data.get("hv_state", None)
1491 if hv_state is not None:
1492 data["hv_state"] = outils.ContainerToDicts(hv_state)
1493
1494 disk_state = data.get("disk_state", None)
1495 if disk_state is not None:
1496 data["disk_state"] = \
1497 dict((key, outils.ContainerToDicts(value))
1498 for (key, value) in disk_state.items())
1499
1500 return data
1501
1502 @classmethod
1503 def FromDict(cls, val):
1504 """Custom function for deserializing.
1505
1506 """
1507 obj = super(Node, cls).FromDict(val)
1508
1509 if obj.hv_state is not None:
1510 obj.hv_state = \
1511 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1512
1513 if obj.disk_state is not None:
1514 obj.disk_state = \
1515 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1516 for (key, value) in obj.disk_state.items())
1517
1518 return obj
1519
1520
1521 class NodeGroup(TaggableObject):
1522 """Config object representing a node group."""
1523 __slots__ = [
1524 "name",
1525 "members",
1526 "ndparams",
1527 "diskparams",
1528 "ipolicy",
1529 "serial_no",
1530 "hv_state_static",
1531 "disk_state_static",
1532 "alloc_policy",
1533 "networks",
1534 ] + _TIMESTAMPS + _UUID
1535
1536 def ToDict(self, _with_private=False):
1537 """Custom function for nodegroup.
1538
1539 This discards the members object, which gets recalculated and is only kept
1540 in memory.
1541
1542 """
1543 mydict = super(NodeGroup, self).ToDict(_with_private=_with_private)
1544 del mydict["members"]
1545 return mydict
1546
1547 @classmethod
1548 def FromDict(cls, val):
1549 """Custom function for nodegroup.
1550
1551 The members slot is initialized to an empty list, upon deserialization.
1552
1553 """
1554 obj = super(NodeGroup, cls).FromDict(val)
1555 obj.members = []
1556 return obj
1557
1558 def UpgradeConfig(self):
1559 """Fill defaults for missing configuration values.
1560
1561 """
1562 if self.ndparams is None:
1563 self.ndparams = {}
1564
1565 if self.serial_no is None:
1566 self.serial_no = 1
1567
1568 if self.alloc_policy is None:
1569 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1570
1571 # We only update mtime, and not ctime, since we would not be able
1572 # to provide a correct value for creation time.
1573 if self.mtime is None:
1574 self.mtime = time.time()
1575
1576 if self.diskparams is None:
1577 self.diskparams = {}
1578 if self.ipolicy is None:
1579 self.ipolicy = MakeEmptyIPolicy()
1580
1581 if self.networks is None:
1582 self.networks = {}
1583
1584 for network, netparams in self.networks.items():
1585 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1586
1587 def FillND(self, node):
1588 """Return filled out ndparams for L{objects.Node}
1589
1590 @type node: L{objects.Node}
1591 @param node: A Node object to fill
1592 @return a copy of the node's ndparams with defaults filled
1593
1594 """
1595 return self.SimpleFillND(node.ndparams)
1596
1597 def SimpleFillND(self, ndparams):
1598 """Fill a given ndparams dict with defaults.
1599
1600 @type ndparams: dict
1601 @param ndparams: the dict to fill
1602 @rtype: dict
1603 @return: a copy of the passed in ndparams with missing keys filled
1604 from the node group defaults
1605
1606 """
1607 return FillDict(self.ndparams, ndparams)
1608
1609
1610 class Cluster(TaggableObject):
1611 """Config object representing the cluster."""
1612 __slots__ = [
1613 "serial_no",
1614 "rsahostkeypub",
1615 "dsahostkeypub",
1616 "highest_used_port",
1617 "tcpudp_port_pool",
1618 "mac_prefix",
1619 "volume_group_name",
1620 "reserved_lvs",
1621 "drbd_usermode_helper",
1622 "default_bridge",
1623 "default_hypervisor",
1624 "master_node",
1625 "master_ip",
1626 "master_netdev",
1627 "master_netmask",
1628 "use_external_mip_script",
1629 "cluster_name",
1630 "file_storage_dir",
1631 "shared_file_storage_dir",
1632 "gluster_storage_dir",
1633 "enabled_hypervisors",
1634 "hvparams",
1635 "ipolicy",
1636 "os_hvp",
1637 "beparams",
1638 "osparams",
1639 "osparams_private_cluster",
1640 "nicparams",
1641 "ndparams",
1642 "diskparams",
1643 "candidate_pool_size",
1644 "modify_etc_hosts",
1645 "modify_ssh_setup",
1646 "maintain_node_health",
1647 "uid_pool",
1648 "default_iallocator",
1649 "default_iallocator_params",
1650 "hidden_os",
1651 "blacklisted_os",
1652 "primary_ip_family",
1653 "prealloc_wipe_disks",
1654 "hv_state_static",
1655 "disk_state_static",
1656 "enabled_disk_templates",
1657 "candidate_certs",
1658 "max_running_jobs",
1659 "max_tracked_jobs",
1660 "install_image",
1661 "instance_communication_network",
1662 "zeroing_image",
1663 "compression_tools",
1664 "enabled_user_shutdown",
1665 "data_collectors",
1666 ] + _TIMESTAMPS + _UUID
1667
1668 def UpgradeConfig(self):
1669 """Fill defaults for missing configuration values.
1670
1671 """
1672 # pylint: disable=E0203
1673 # because these are "defined" via slots, not manually
1674 if self.hvparams is None:
1675 self.hvparams = constants.HVC_DEFAULTS
1676 else:
1677 for hypervisor in constants.HYPER_TYPES:
1678 try:
1679 existing_params = self.hvparams[hypervisor]
1680 except KeyError:
1681 existing_params = {}
1682 self.hvparams[hypervisor] = FillDict(
1683 constants.HVC_DEFAULTS[hypervisor], existing_params)
1684
1685 if self.os_hvp is None:
1686 self.os_hvp = {}
1687
1688 if self.osparams is None:
1689 self.osparams = {}
1690 # osparams_private_cluster added in 2.12
1691 if self.osparams_private_cluster is None:
1692 self.osparams_private_cluster = {}
1693
1694 self.ndparams = UpgradeNDParams(self.ndparams)
1695
1696 self.beparams = UpgradeGroupedParams(self.beparams,
1697 constants.BEC_DEFAULTS)
1698 for beparams_group in self.beparams:
1699 UpgradeBeParams(self.beparams[beparams_group])
1700
1701 migrate_default_bridge = not self.nicparams
1702 self.nicparams = UpgradeGroupedParams(self.nicparams,
1703 constants.NICC_DEFAULTS)
1704 if migrate_default_bridge:
1705 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1706 self.default_bridge
1707
1708 if self.modify_etc_hosts is None:
1709 self.modify_etc_hosts = True
1710
1711 if self.modify_ssh_setup is None:
1712 self.modify_ssh_setup = True
1713
1714 # default_bridge is no longer used in 2.1. The slot is left there to
1715 # support auto-upgrading. It can be removed once we decide to deprecate
1716 # upgrading straight from 2.0.
1717 if self.default_bridge is not None:
1718 self.default_bridge = None
1719
1720 # default_hypervisor is just the first enabled one in 2.1. This slot and
1721 # code can be removed once upgrading straight from 2.0 is deprecated.
1722 if self.default_hypervisor is not None:
1723 self.enabled_hypervisors = ([self.default_hypervisor] +
1724 [hvname for hvname in self.enabled_hypervisors
1725 if hvname != self.default_hypervisor])
1726 self.default_hypervisor = None
1727
1728 # maintain_node_health added after 2.1.1
1729 if self.maintain_node_health is None:
1730 self.maintain_node_health = False
1731
1732 if self.uid_pool is None:
1733 self.uid_pool = []
1734
1735 if self.default_iallocator is None:
1736 self.default_iallocator = ""
1737
1738 if self.default_iallocator_params is None:
1739 self.default_iallocator_params = {}
1740
1741 # reserved_lvs added before 2.2
1742 if self.reserved_lvs is None:
1743 self.reserved_lvs = []
1744
1745 # hidden and blacklisted operating systems added before 2.2.1
1746 if self.hidden_os is None:
1747 self.hidden_os = []
1748
1749 if self.blacklisted_os is None:
1750 self.blacklisted_os = []
1751
1752 # primary_ip_family added before 2.3
1753 if self.primary_ip_family is None:
1754 self.primary_ip_family = AF_INET
1755
1756 if self.master_netmask is None:
1757 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1758 self.master_netmask = ipcls.iplen
1759
1760 if self.prealloc_wipe_disks is None:
1761 self.prealloc_wipe_disks = False
1762
1763 # shared_file_storage_dir added before 2.5
1764 if self.shared_file_storage_dir is None:
1765 self.shared_file_storage_dir = ""
1766
1767 # gluster_storage_dir added in 2.11
1768 if self.gluster_storage_dir is None:
1769 self.gluster_storage_dir = ""
1770
1771 if self.use_external_mip_script is None:
1772 self.use_external_mip_script = False
1773
1774 if self.diskparams:
1775 self.diskparams = UpgradeDiskParams(self.diskparams)
1776 else:
1777 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1778
1779 # instance policy added before 2.6
1780 if self.ipolicy is None:
1781 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1782 else:
1783 # we can either make sure to upgrade the ipolicy always, or only
1784 # do it in some corner cases (e.g. missing keys); note that this
1785 # will break any removal of keys from the ipolicy dict
1786 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1787 if wrongkeys:
1788 # These keys would be silently removed by FillIPolicy()
1789 msg = ("Cluster instance policy contains spurious keys: %s" %
1790 utils.CommaJoin(wrongkeys))
1791 raise errors.ConfigurationError(msg)
1792 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1793
1794 # hv_state_static added in 2.7
1795 if self.hv_state_static is None:
1796 self.hv_state_static = {}
1797 if self.disk_state_static is None:
1798 self.disk_state_static = {}
1799
1800 if self.candidate_certs is None:
1801 self.candidate_certs = {}
1802
1803 if self.max_running_jobs is None:
1804 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
1805
1806 if self.max_tracked_jobs is None:
1807 self.max_tracked_jobs = constants.LUXID_MAXIMAL_TRACKED_JOBS_DEFAULT
1808
1809 if self.instance_communication_network is None:
1810 self.instance_communication_network = ""
1811
1812 if self.install_image is None:
1813 self.install_image = ""
1814
1815 if self.compression_tools is None:
1816 self.compression_tools = constants.IEC_DEFAULT_TOOLS
1817
1818 if self.enabled_user_shutdown is None:
1819 self.enabled_user_shutdown = False
1820
1821 @property
1822 def primary_hypervisor(self):
1823 """The first hypervisor is the primary.
1824
1825 Useful, for example, for L{Node}'s hv/disk state.
1826
1827 """
1828 return self.enabled_hypervisors[0]
1829
1830 def ToDict(self, _with_private=False):
1831 """Custom function for cluster.
1832
1833 """
1834 mydict = super(Cluster, self).ToDict(_with_private=_with_private)
1835
1836 # Explicitly save private parameters.
1837 if _with_private:
1838 for os in mydict["osparams_private_cluster"]:
1839 mydict["osparams_private_cluster"][os] = \
1840 self.osparams_private_cluster[os].Unprivate()
1841
1842 if self.tcpudp_port_pool is None:
1843 tcpudp_port_pool = []
1844 else:
1845 tcpudp_port_pool = list(self.tcpudp_port_pool)
1846
1847 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1848
1849 return mydict
1850
1851 @classmethod
1852 def FromDict(cls, val):
1853 """Custom function for cluster.
1854
1855 """
1856 obj = super(Cluster, cls).FromDict(val)
1857
1858 if obj.tcpudp_port_pool is None:
1859 obj.tcpudp_port_pool = set()
1860 elif not isinstance(obj.tcpudp_port_pool, set):
1861 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1862
1863 return obj
1864
1865 def SimpleFillDP(self, diskparams):
1866 """Fill a given diskparams dict with cluster defaults.
1867
1868 @param diskparams: The diskparams
1869 @return: The defaults dict
1870
1871 """
1872 return FillDiskParams(self.diskparams, diskparams)
1873
1874 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1875 """Get the default hypervisor parameters for the cluster.
1876
1877 @param hypervisor: the hypervisor name
1878 @param os_name: if specified, we'll also update the defaults for this OS
1879 @param skip_keys: if passed, list of keys not to use
1880 @return: the defaults dict
1881
1882 """
1883 if skip_keys is None:
1884 skip_keys = []
1885
1886 fill_stack = [self.hvparams.get(hypervisor, {})]
1887 if os_name is not None:
1888 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1889 fill_stack.append(os_hvp)
1890
1891 ret_dict = {}
1892 for o_dict in fill_stack:
1893 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1894
1895 return ret_dict
1896
1897 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1898 """Fill a given hvparams dict with cluster defaults.
1899
1900 @type hv_name: string
1901 @param hv_name: the hypervisor to use
1902 @type os_name: string
1903 @param os_name: the OS to use for overriding the hypervisor defaults
1904 @type skip_globals: boolean
1905 @param skip_globals: if True, the global hypervisor parameters will
1906 not be filled
1907 @rtype: dict
1908 @return: a copy of the given hvparams with missing keys filled from
1909 the cluster defaults
1910
1911 """
1912 if skip_globals:
1913 skip_keys = constants.HVC_GLOBALS
1914 else:
1915 skip_keys = []
1916
1917 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1918 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1919
1920 def FillHV(self, instance, skip_globals=False):
1921 """Fill an instance's hvparams dict with cluster defaults.
1922
1923 @type instance: L{objects.Instance}
1924 @param instance: the instance parameter to fill
1925 @type skip_globals: boolean
1926 @param skip_globals: if True, the global hypervisor parameters will
1927 not be filled
1928 @rtype: dict
1929 @return: a copy of the instance's hvparams with missing keys filled from
1930 the cluster defaults
1931
1932 """
1933 return self.SimpleFillHV(instance.hypervisor, instance.os,
1934 instance.hvparams, skip_globals)
1935
1936 def SimpleFillBE(self, beparams):
1937 """Fill a given beparams dict with cluster defaults.
1938
1939 @type beparams: dict
1940 @param beparams: the dict to fill
1941 @rtype: dict
1942 @return: a copy of the passed in beparams with missing keys filled
1943 from the cluster defaults
1944
1945 """
1946 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1947
1948 def FillBE(self, instance):
1949 """Fill an instance's beparams dict with cluster defaults.
1950
1951 @type instance: L{objects.Instance}
1952 @param instance: the instance parameter to fill
1953 @rtype: dict
1954 @return: a copy of the instance's beparams with missing keys filled from
1955 the cluster defaults
1956
1957 """
1958 return self.SimpleFillBE(instance.beparams)
1959
1960 def SimpleFillNIC(self, nicparams):
1961 """Fill a given nicparams dict with cluster defaults.
1962
1963 @type nicparams: dict
1964 @param nicparams: the dict to fill
1965 @rtype: dict
1966 @return: a copy of the passed in nicparams with missing keys filled
1967 from the cluster defaults
1968
1969 """
1970 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1971
1972 def SimpleFillOS(self, os_name,
1973 os_params_public,
1974 os_params_private=None,
1975 os_params_secret=None):
1976 """Fill an instance's osparams dict with cluster defaults.
1977
1978 @type os_name: string
1979 @param os_name: the OS name to use
1980 @type os_params_public: dict
1981 @param os_params_public: the dict to fill with default values
1982 @type os_params_private: dict
1983 @param os_params_private: the dict with private fields to fill
1984 with default values. Not passing this field
1985 results in no private fields being added to the
1986 return value. Private fields will be wrapped in
1987 L{Private} objects.
1988 @type os_params_secret: dict
1989 @param os_params_secret: the dict with secret fields to fill
1990 with default values. Not passing this field
1991 results in no secret fields being added to the
1992 return value. Private fields will be wrapped in
1993 L{Private} objects.
1994 @rtype: dict
1995 @return: a copy of the instance's osparams with missing keys filled from
1996 the cluster defaults. Private and secret parameters are not included
1997 unless the respective optional parameters are supplied.
1998
1999 """
2000 if os_name is None:
2001 name_only = None
2002 else:
2003 name_only = OS.GetName(os_name)
2004
2005 defaults_base_public = self.osparams.get(name_only, {})
2006 defaults_public = FillDict(defaults_base_public,
2007 self.osparams.get(os_name, {}))
2008 params_public = FillDict(defaults_public, os_params_public)
2009
2010 if os_params_private is not None:
2011 defaults_base_private = self.osparams_private_cluster.get(name_only, {})
2012 defaults_private = FillDict(defaults_base_private,
2013 self.osparams_private_cluster.get(os_name,
2014 {}))
2015 params_private = FillDict(defaults_private, os_params_private)
2016 else:
2017 params_private = {}
2018
2019 if os_params_secret is not None:
2020 # There can't be default secret settings, so there's nothing to be done.
2021 params_secret = os_params_secret
2022 else:
2023 params_secret = {}
2024
2025 # Enforce that the set of keys be distinct:
2026 duplicate_keys = utils.GetRepeatedKeys(params_public,
2027 params_private,
2028 params_secret)
2029 if not duplicate_keys:
2030
2031 # Actually update them:
2032 params_public.update(params_private)
2033 params_public.update(params_secret)
2034
2035 return params_public
2036
2037 else:
2038
2039 def formatter(keys):
2040 return utils.CommaJoin(sorted(map(repr, keys))) if keys else "(none)"
2041
2042 #Lose the values.
2043 params_public = set(params_public)
2044 params_private = set(params_private)
2045 params_secret = set(params_secret)
2046
2047 msg = """Cannot assign multiple values to OS parameters.
2048
2049 Conflicting OS parameters that would have been set by this operation:
2050 - at public visibility: {public}
2051 - at private visibility: {private}
2052 - at secret visibility: {secret}
2053 """.format(dupes=formatter(duplicate_keys),
2054 public=formatter(params_public & duplicate_keys),
2055 private=formatter(params_private & duplicate_keys),
2056 secret=formatter(params_secret & duplicate_keys))
2057 raise errors.OpPrereqError(msg)
2058
2059 @staticmethod
2060 def SimpleFillHvState(hv_state):
2061 """Fill an hv_state sub dict with cluster defaults.
2062
2063 """
2064 return FillDict(constants.HVST_DEFAULTS, hv_state)
2065
2066 @staticmethod
2067 def SimpleFillDiskState(disk_state):
2068 """Fill an disk_state sub dict with cluster defaults.
2069
2070 """
2071 return FillDict(constants.DS_DEFAULTS, disk_state)
2072
2073 def FillND(self, node, nodegroup):
2074 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
2075
2076 @type node: L{objects.Node}
2077 @param node: A Node object to fill
2078 @type nodegroup: L{objects.NodeGroup}
2079 @param nodegroup: A Node object to fill
2080 @return a copy of the node's ndparams with defaults filled
2081
2082 """
2083 return self.SimpleFillND(nodegroup.FillND(node))
2084
2085 def FillNDGroup(self, nodegroup):
2086 """Return filled out ndparams for just L{objects.NodeGroup}
2087
2088 @type nodegroup: L{objects.NodeGroup}
2089 @param nodegroup: A Node object to fill
2090 @return a copy of the node group's ndparams with defaults filled
2091
2092 """
2093 return self.SimpleFillND(nodegroup.SimpleFillND({}))
2094
2095 def SimpleFillND(self, ndparams):
2096 """Fill a given ndparams dict with defaults.
2097
2098 @type ndparams: dict
2099 @param ndparams: the dict to fill
2100 @rtype: dict
2101 @return: a copy of the passed in ndparams with missing keys filled
2102 from the cluster defaults
2103
2104 """
2105 return FillDict(self.ndparams, ndparams)
2106
2107 def SimpleFillIPolicy(self, ipolicy):
2108 """ Fill instance policy dict with defaults.
2109
2110 @type ipolicy: dict
2111 @param ipolicy: the dict to fill
2112 @rtype: dict
2113 @return: a copy of passed ipolicy with missing keys filled from
2114 the cluster defaults
2115
2116 """
2117 return FillIPolicy(self.ipolicy, ipolicy)
2118
2119 def IsDiskTemplateEnabled(self, disk_template):
2120 """Checks if a particular disk template is enabled.
2121
2122 """
2123 return utils.storage.IsDiskTemplateEnabled(
2124 disk_template, self.enabled_disk_templates)
2125
2126 def IsFileStorageEnabled(self):
2127 """Checks if file storage is enabled.
2128
2129 """
2130 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
2131
2132 def IsSharedFileStorageEnabled(self):
2133 """Checks if shared file storage is enabled.
2134
2135 """
2136 return utils.storage.IsSharedFileStorageEnabled(
2137 self.enabled_disk_templates)
2138
2139
2140 class BlockDevStatus(ConfigObject):
2141 """Config object representing the status of a block device."""
2142 __slots__ = [
2143 "dev_path",
2144 "major",
2145 "minor",
2146 "sync_percent",
2147 "estimated_time",
2148 "is_degraded",
2149 "ldisk_status",
2150 ]
2151
2152
2153 class ImportExportStatus(ConfigObject):
2154 """Config object representing the status of an import or export."""
2155 __slots__ = [
2156 "recent_output",
2157 "listen_port",
2158 "connected",
2159 "progress_mbytes",
2160 "progress_throughput",
2161 "progress_eta",
2162 "progress_percent",
2163 "exit_status",
2164 "error_message",
2165 ] + _TIMESTAMPS
2166
2167
2168 class ImportExportOptions(ConfigObject):
2169 """Options for import/export daemon
2170
2171 @ivar key_name: X509 key name (None for cluster certificate)
2172 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
2173 @ivar compress: Compression tool to use
2174 @ivar magic: Used to ensure the connection goes to the right disk
2175 @ivar ipv6: Whether to use IPv6
2176 @ivar connect_timeout: Number of seconds for establishing connection
2177
2178 """
2179 __slots__ = [
2180 "key_name",
2181 "ca_pem",
2182 "compress",
2183 "magic",
2184 "ipv6",
2185 "connect_timeout",
2186 ]
2187
2188
2189 class ConfdRequest(ConfigObject):
2190 """Object holding a confd request.
2191
2192 @ivar protocol: confd protocol version
2193 @ivar type: confd query type
2194 @ivar query: query request
2195 @ivar rsalt: requested reply salt
2196
2197 """
2198 __slots__ = [
2199 "protocol",
2200 "type",
2201 "query",
2202 "rsalt",
2203 ]
2204
2205
2206 class ConfdReply(ConfigObject):
2207 """Object holding a confd reply.
2208
2209 @ivar protocol: confd protocol version
2210 @ivar status: reply status code (ok, error)
2211 @ivar answer: confd query reply
2212 @ivar serial: configuration serial number
2213
2214 """
2215 __slots__ = [
2216 "protocol",
2217 "status",
2218 "answer",
2219 "serial",
2220 ]
2221
2222
2223 class QueryFieldDefinition(ConfigObject):
2224 """Object holding a query field definition.
2225
2226 @ivar name: Field name
2227 @ivar title: Human-readable title
2228 @ivar kind: Field type
2229 @ivar doc: Human-readable description
2230
2231 """
2232 __slots__ = [
2233 "name",
2234 "title",
2235 "kind",
2236 "doc",
2237 ]
2238
2239
2240 class _QueryResponseBase(ConfigObject):
2241 __slots__ = [
2242 "fields",
2243 ]
2244
2245 def ToDict(self, _with_private=False):
2246 """Custom function for serializing.
2247
2248 """
2249 mydict = super(_QueryResponseBase, self).ToDict()
2250 mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2251 return mydict
2252
2253 @classmethod
2254 def FromDict(cls, val):
2255 """Custom function for de-serializing.
2256
2257 """
2258 obj = super(_QueryResponseBase, cls).FromDict(val)
2259 obj.fields = \
2260 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2261 return obj
2262
2263
2264 class QueryResponse(_QueryResponseBase):
2265 """Object holding the response to a query.
2266
2267 @ivar fields: List of L{QueryFieldDefinition} objects
2268 @ivar data: Requested data
2269
2270 """
2271 __slots__ = [
2272 "data",
2273 ]
2274
2275
2276 class QueryFieldsRequest(ConfigObject):
2277 """Object holding a request for querying available fields.
2278
2279 """
2280 __slots__ = [
2281 "what",
2282 "fields",
2283 ]
2284
2285
2286 class QueryFieldsResponse(_QueryResponseBase):
2287 """Object holding the response to a query for fields.
2288
2289 @ivar fields: List of L{QueryFieldDefinition} objects
2290
2291 """
2292 __slots__ = []
2293
2294
2295 class MigrationStatus(ConfigObject):
2296 """Object holding the status of a migration.
2297
2298 """
2299 __slots__ = [
2300 "status",
2301 "transferred_ram",
2302 "total_ram",
2303 ]
2304
2305
2306 class InstanceConsole(ConfigObject):
2307 """Object describing how to access the console of an instance.
2308
2309 """
2310 __slots__ = [
2311 "instance",
2312 "kind",
2313 "message",
2314 "host",
2315 "port",
2316 "user",
2317 "command",
2318 "display",
2319 ]
2320
2321 def Validate(self):
2322 """Validates contents of this object.
2323
2324 """
2325 assert self.kind in constants.CONS_ALL, "Unknown console type"
2326 assert self.instance, "Missing instance name"
2327 assert self.message or self.kind in [constants.CONS_SSH,
2328 constants.CONS_SPICE,
2329 constants.CONS_VNC]
2330 assert self.host or self.kind == constants.CONS_MESSAGE
2331 assert self.port or self.kind in [constants.CONS_MESSAGE,
2332 constants.CONS_SSH]
2333 assert self.user or self.kind in [constants.CONS_MESSAGE,
2334 constants.CONS_SPICE,
2335 constants.CONS_VNC]
2336 assert self.command or self.kind in [constants.CONS_MESSAGE,
2337 constants.CONS_SPICE,
2338 constants.CONS_VNC]
2339 assert self.display or self.kind in [constants.CONS_MESSAGE,
2340 constants.CONS_SPICE,
2341 constants.CONS_SSH]
2342
2343
2344 class Network(TaggableObject):
2345 """Object representing a network definition for ganeti.
2346
2347 """
2348 __slots__ = [
2349 "name",
2350 "serial_no",
2351 "mac_prefix",
2352 "network",
2353 "network6",
2354 "gateway",
2355 "gateway6",
2356 "reservations",
2357 "ext_reservations",
2358 ] + _TIMESTAMPS + _UUID
2359
2360 def HooksDict(self, prefix=""):
2361 """Export a dictionary used by hooks with a network's information.
2362
2363 @type prefix: String
2364 @param prefix: Prefix to prepend to the dict entries
2365
2366 """
2367 result = {
2368 "%sNETWORK_NAME" % prefix: self.name,
2369 "%sNETWORK_UUID" % prefix: self.uuid,
2370 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2371 }
2372 if self.network:
2373 result["%sNETWORK_SUBNET" % prefix] = self.network
2374 if self.gateway:
2375 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2376 if self.network6:
2377 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2378 if self.gateway6:
2379 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2380 if self.mac_prefix:
2381 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2382
2383 return result
2384
2385 @classmethod
2386 def FromDict(cls, val):
2387 """Custom function for networks.
2388
2389 Remove deprecated network_type and family.
2390
2391 """
2392 if "network_type" in val:
2393 del val["network_type"]
2394 if "family" in val:
2395 del val["family"]
2396 obj = super(Network, cls).FromDict(val)
2397 return obj
2398
2399
2400 # need to inherit object in order to use super()
2401 class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2402 """Simple wrapper over ConfigParse that allows serialization.
2403
2404 This class is basically ConfigParser.SafeConfigParser with two
2405 additional methods that allow it to serialize/unserialize to/from a
2406 buffer.
2407
2408 """
2409 def Dumps(self):
2410 """Dump this instance and return the string representation."""
2411 buf = StringIO()
2412 self.write(buf)
2413 return buf.getvalue()
2414
2415 @classmethod
2416 def Loads(cls, data):
2417 """Load data from a string."""
2418 buf = StringIO(data)
2419 cfp = cls()
2420 cfp.readfp(buf)
2421 return cfp
2422
2423 def get(self, section, option, **kwargs):
2424 value = None
2425 try:
2426 value = super(SerializableConfigParser, self).get(section, option,
2427 **kwargs)
2428 if value.lower() == constants.VALUE_NONE:
2429 value = None
2430 except ConfigParser.NoOptionError:
2431 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2432 match = r.match(option)
2433 if match:
2434 pass
2435 else:
2436 raise
2437
2438 return value
2439
2440
2441 class LvmPvInfo(ConfigObject):
2442 """Information about an LVM physical volume (PV).
2443
2444 @type name: string
2445 @ivar name: name of the PV
2446 @type vg_name: string
2447 @ivar vg_name: name of the volume group containing the PV
2448 @type size: float
2449 @ivar size: size of the PV in MiB
2450 @type free: float
2451 @ivar free: free space in the PV, in MiB
2452 @type attributes: string
2453 @ivar attributes: PV attributes
2454 @type lv_list: list of strings
2455 @ivar lv_list: names of the LVs hosted on the PV
2456 """
2457 __slots__ = [
2458 "name",
2459 "vg_name",
2460 "size",
2461 "free",
2462 "attributes",
2463 "lv_list"
2464 ]
2465
2466 def IsEmpty(self):
2467 """Is this PV empty?
2468
2469 """
2470 return self.size <= (self.free + 1)
2471
2472 def IsAllocatable(self):
2473 """Is this PV allocatable?
2474
2475 """
2476 return ("a" in self.attributes)