cmdlib: Cleanup public/private functions
[ganeti-github.git] / lib / cmdlib / group.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with node groups."""
23
24 import logging
25
26 from ganeti import constants
27 from ganeti import errors
28 from ganeti import locking
29 from ganeti import objects
30 from ganeti import qlang
31 from ganeti import query
32 from ganeti import utils
33 from ganeti.masterd import iallocator
34 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
35 ResultWithJobs
36 from ganeti.cmdlib.common import MergeAndVerifyHvState, \
37 MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
38 CheckNodeGroupInstances, GetUpdatedIPolicy, \
39 ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
40 CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
41
42 import ganeti.masterd.instance
43
44
45 class LUGroupAdd(LogicalUnit):
46 """Logical unit for creating node groups.
47
48 """
49 HPATH = "group-add"
50 HTYPE = constants.HTYPE_GROUP
51 REQ_BGL = False
52
53 def ExpandNames(self):
54 # We need the new group's UUID here so that we can create and acquire the
55 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
56 # that it should not check whether the UUID exists in the configuration.
57 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
58 self.needed_locks = {}
59 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
60
61 def CheckPrereq(self):
62 """Check prerequisites.
63
64 This checks that the given group name is not an existing node group
65 already.
66
67 """
68 try:
69 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
70 except errors.OpPrereqError:
71 pass
72 else:
73 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
74 " node group (UUID: %s)" %
75 (self.op.group_name, existing_uuid),
76 errors.ECODE_EXISTS)
77
78 if self.op.ndparams:
79 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
80
81 if self.op.hv_state:
82 self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
83 else:
84 self.new_hv_state = None
85
86 if self.op.disk_state:
87 self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
88 else:
89 self.new_disk_state = None
90
91 if self.op.diskparams:
92 for templ in constants.DISK_TEMPLATES:
93 if templ in self.op.diskparams:
94 utils.ForceDictType(self.op.diskparams[templ],
95 constants.DISK_DT_TYPES)
96 self.new_diskparams = self.op.diskparams
97 try:
98 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
99 except errors.OpPrereqError, err:
100 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
101 errors.ECODE_INVAL)
102 else:
103 self.new_diskparams = {}
104
105 if self.op.ipolicy:
106 cluster = self.cfg.GetClusterInfo()
107 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
108 try:
109 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
110 except errors.ConfigurationError, err:
111 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
112 errors.ECODE_INVAL)
113
114 def BuildHooksEnv(self):
115 """Build hooks env.
116
117 """
118 return {
119 "GROUP_NAME": self.op.group_name,
120 }
121
122 def BuildHooksNodes(self):
123 """Build hooks nodes.
124
125 """
126 mn = self.cfg.GetMasterNode()
127 return ([mn], [mn])
128
129 def Exec(self, feedback_fn):
130 """Add the node group to the cluster.
131
132 """
133 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
134 uuid=self.group_uuid,
135 alloc_policy=self.op.alloc_policy,
136 ndparams=self.op.ndparams,
137 diskparams=self.new_diskparams,
138 ipolicy=self.op.ipolicy,
139 hv_state_static=self.new_hv_state,
140 disk_state_static=self.new_disk_state)
141
142 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
143 del self.remove_locks[locking.LEVEL_NODEGROUP]
144
145
146 class LUGroupAssignNodes(NoHooksLU):
147 """Logical unit for assigning nodes to groups.
148
149 """
150 REQ_BGL = False
151
152 def ExpandNames(self):
153 # These raise errors.OpPrereqError on their own:
154 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
155 self.op.nodes = GetWantedNodes(self, self.op.nodes)
156
157 # We want to lock all the affected nodes and groups. We have readily
158 # available the list of nodes, and the *destination* group. To gather the
159 # list of "source" groups, we need to fetch node information later on.
160 self.needed_locks = {
161 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
162 locking.LEVEL_NODE: self.op.nodes,
163 }
164
165 def DeclareLocks(self, level):
166 if level == locking.LEVEL_NODEGROUP:
167 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
168
169 # Try to get all affected nodes' groups without having the group or node
170 # lock yet. Needs verification later in the code flow.
171 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
172
173 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
174
175 def CheckPrereq(self):
176 """Check prerequisites.
177
178 """
179 assert self.needed_locks[locking.LEVEL_NODEGROUP]
180 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
181 frozenset(self.op.nodes))
182
183 expected_locks = (set([self.group_uuid]) |
184 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
185 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
186 if actual_locks != expected_locks:
187 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
188 " current groups are '%s', used to be '%s'" %
189 (utils.CommaJoin(expected_locks),
190 utils.CommaJoin(actual_locks)))
191
192 self.node_data = self.cfg.GetAllNodesInfo()
193 self.group = self.cfg.GetNodeGroup(self.group_uuid)
194 instance_data = self.cfg.GetAllInstancesInfo()
195
196 if self.group is None:
197 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
198 (self.op.group_name, self.group_uuid))
199
200 (new_splits, previous_splits) = \
201 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
202 for node in self.op.nodes],
203 self.node_data, instance_data)
204
205 if new_splits:
206 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
207
208 if not self.op.force:
209 raise errors.OpExecError("The following instances get split by this"
210 " change and --force was not given: %s" %
211 fmt_new_splits)
212 else:
213 self.LogWarning("This operation will split the following instances: %s",
214 fmt_new_splits)
215
216 if previous_splits:
217 self.LogWarning("In addition, these already-split instances continue"
218 " to be split across groups: %s",
219 utils.CommaJoin(utils.NiceSort(previous_splits)))
220
221 def Exec(self, feedback_fn):
222 """Assign nodes to a new group.
223
224 """
225 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
226
227 self.cfg.AssignGroupNodes(mods)
228
229 @staticmethod
230 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
231 """Check for split instances after a node assignment.
232
233 This method considers a series of node assignments as an atomic operation,
234 and returns information about split instances after applying the set of
235 changes.
236
237 In particular, it returns information about newly split instances, and
238 instances that were already split, and remain so after the change.
239
240 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
241 considered.
242
243 @type changes: list of (node_name, new_group_uuid) pairs.
244 @param changes: list of node assignments to consider.
245 @param node_data: a dict with data for all nodes
246 @param instance_data: a dict with all instances to consider
247 @rtype: a two-tuple
248 @return: a list of instances that were previously okay and result split as a
249 consequence of this change, and a list of instances that were previously
250 split and this change does not fix.
251
252 """
253 changed_nodes = dict((node, group) for node, group in changes
254 if node_data[node].group != group)
255
256 all_split_instances = set()
257 previously_split_instances = set()
258
259 def InstanceNodes(instance):
260 return [instance.primary_node] + list(instance.secondary_nodes)
261
262 for inst in instance_data.values():
263 if inst.disk_template not in constants.DTS_INT_MIRROR:
264 continue
265
266 instance_nodes = InstanceNodes(inst)
267
268 if len(set(node_data[node].group for node in instance_nodes)) > 1:
269 previously_split_instances.add(inst.name)
270
271 if len(set(changed_nodes.get(node, node_data[node].group)
272 for node in instance_nodes)) > 1:
273 all_split_instances.add(inst.name)
274
275 return (list(all_split_instances - previously_split_instances),
276 list(previously_split_instances & all_split_instances))
277
278
279 class GroupQuery(QueryBase):
280 FIELDS = query.GROUP_FIELDS
281
282 def ExpandNames(self, lu):
283 lu.needed_locks = {}
284
285 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
286 self._cluster = lu.cfg.GetClusterInfo()
287 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
288
289 if not self.names:
290 self.wanted = [name_to_uuid[name]
291 for name in utils.NiceSort(name_to_uuid.keys())]
292 else:
293 # Accept names to be either names or UUIDs.
294 missing = []
295 self.wanted = []
296 all_uuid = frozenset(self._all_groups.keys())
297
298 for name in self.names:
299 if name in all_uuid:
300 self.wanted.append(name)
301 elif name in name_to_uuid:
302 self.wanted.append(name_to_uuid[name])
303 else:
304 missing.append(name)
305
306 if missing:
307 raise errors.OpPrereqError("Some groups do not exist: %s" %
308 utils.CommaJoin(missing),
309 errors.ECODE_NOENT)
310
311 def DeclareLocks(self, lu, level):
312 pass
313
314 def _GetQueryData(self, lu):
315 """Computes the list of node groups and their attributes.
316
317 """
318 do_nodes = query.GQ_NODE in self.requested_data
319 do_instances = query.GQ_INST in self.requested_data
320
321 group_to_nodes = None
322 group_to_instances = None
323
324 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
325 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
326 # latter GetAllInstancesInfo() is not enough, for we have to go through
327 # instance->node. Hence, we will need to process nodes even if we only need
328 # instance information.
329 if do_nodes or do_instances:
330 all_nodes = lu.cfg.GetAllNodesInfo()
331 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
332 node_to_group = {}
333
334 for node in all_nodes.values():
335 if node.group in group_to_nodes:
336 group_to_nodes[node.group].append(node.name)
337 node_to_group[node.name] = node.group
338
339 if do_instances:
340 all_instances = lu.cfg.GetAllInstancesInfo()
341 group_to_instances = dict((uuid, []) for uuid in self.wanted)
342
343 for instance in all_instances.values():
344 node = instance.primary_node
345 if node in node_to_group:
346 group_to_instances[node_to_group[node]].append(instance.name)
347
348 if not do_nodes:
349 # Do not pass on node information if it was not requested.
350 group_to_nodes = None
351
352 return query.GroupQueryData(self._cluster,
353 [self._all_groups[uuid]
354 for uuid in self.wanted],
355 group_to_nodes, group_to_instances,
356 query.GQ_DISKPARAMS in self.requested_data)
357
358
359 class LUGroupQuery(NoHooksLU):
360 """Logical unit for querying node groups.
361
362 """
363 REQ_BGL = False
364
365 def CheckArguments(self):
366 self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
367 self.op.output_fields, False)
368
369 def ExpandNames(self):
370 self.gq.ExpandNames(self)
371
372 def DeclareLocks(self, level):
373 self.gq.DeclareLocks(self, level)
374
375 def Exec(self, feedback_fn):
376 return self.gq.OldStyleQuery(self)
377
378
379 class LUGroupSetParams(LogicalUnit):
380 """Modifies the parameters of a node group.
381
382 """
383 HPATH = "group-modify"
384 HTYPE = constants.HTYPE_GROUP
385 REQ_BGL = False
386
387 def CheckArguments(self):
388 all_changes = [
389 self.op.ndparams,
390 self.op.diskparams,
391 self.op.alloc_policy,
392 self.op.hv_state,
393 self.op.disk_state,
394 self.op.ipolicy,
395 ]
396
397 if all_changes.count(None) == len(all_changes):
398 raise errors.OpPrereqError("Please pass at least one modification",
399 errors.ECODE_INVAL)
400
401 def ExpandNames(self):
402 # This raises errors.OpPrereqError on its own:
403 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
404
405 self.needed_locks = {
406 locking.LEVEL_INSTANCE: [],
407 locking.LEVEL_NODEGROUP: [self.group_uuid],
408 }
409
410 self.share_locks[locking.LEVEL_INSTANCE] = 1
411
412 def DeclareLocks(self, level):
413 if level == locking.LEVEL_INSTANCE:
414 assert not self.needed_locks[locking.LEVEL_INSTANCE]
415
416 # Lock instances optimistically, needs verification once group lock has
417 # been acquired
418 self.needed_locks[locking.LEVEL_INSTANCE] = \
419 self.cfg.GetNodeGroupInstances(self.group_uuid)
420
421 @staticmethod
422 def _UpdateAndVerifyDiskParams(old, new):
423 """Updates and verifies disk parameters.
424
425 """
426 new_params = GetUpdatedParams(old, new)
427 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
428 return new_params
429
430 def CheckPrereq(self):
431 """Check prerequisites.
432
433 """
434 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
435
436 # Check if locked instances are still correct
437 CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
438
439 self.group = self.cfg.GetNodeGroup(self.group_uuid)
440 cluster = self.cfg.GetClusterInfo()
441
442 if self.group is None:
443 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
444 (self.op.group_name, self.group_uuid))
445
446 if self.op.ndparams:
447 new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
448 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
449 self.new_ndparams = new_ndparams
450
451 if self.op.diskparams:
452 diskparams = self.group.diskparams
453 uavdp = self._UpdateAndVerifyDiskParams
454 # For each disktemplate subdict update and verify the values
455 new_diskparams = dict((dt,
456 uavdp(diskparams.get(dt, {}),
457 self.op.diskparams[dt]))
458 for dt in constants.DISK_TEMPLATES
459 if dt in self.op.diskparams)
460 # As we've all subdicts of diskparams ready, lets merge the actual
461 # dict with all updated subdicts
462 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
463 try:
464 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
465 except errors.OpPrereqError, err:
466 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
467 errors.ECODE_INVAL)
468
469 if self.op.hv_state:
470 self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
471 self.group.hv_state_static)
472
473 if self.op.disk_state:
474 self.new_disk_state = \
475 MergeAndVerifyDiskState(self.op.disk_state,
476 self.group.disk_state_static)
477
478 if self.op.ipolicy:
479 self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
480 self.op.ipolicy,
481 group_policy=True)
482
483 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
484 inst_filter = lambda inst: inst.name in owned_instances
485 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
486 gmi = ganeti.masterd.instance
487 violations = \
488 ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
489 self.group),
490 new_ipolicy, instances, self.cfg)
491
492 if violations:
493 self.LogWarning("After the ipolicy change the following instances"
494 " violate them: %s",
495 utils.CommaJoin(violations))
496
497 def BuildHooksEnv(self):
498 """Build hooks env.
499
500 """
501 return {
502 "GROUP_NAME": self.op.group_name,
503 "NEW_ALLOC_POLICY": self.op.alloc_policy,
504 }
505
506 def BuildHooksNodes(self):
507 """Build hooks nodes.
508
509 """
510 mn = self.cfg.GetMasterNode()
511 return ([mn], [mn])
512
513 def Exec(self, feedback_fn):
514 """Modifies the node group.
515
516 """
517 result = []
518
519 if self.op.ndparams:
520 self.group.ndparams = self.new_ndparams
521 result.append(("ndparams", str(self.group.ndparams)))
522
523 if self.op.diskparams:
524 self.group.diskparams = self.new_diskparams
525 result.append(("diskparams", str(self.group.diskparams)))
526
527 if self.op.alloc_policy:
528 self.group.alloc_policy = self.op.alloc_policy
529
530 if self.op.hv_state:
531 self.group.hv_state_static = self.new_hv_state
532
533 if self.op.disk_state:
534 self.group.disk_state_static = self.new_disk_state
535
536 if self.op.ipolicy:
537 self.group.ipolicy = self.new_ipolicy
538
539 self.cfg.Update(self.group, feedback_fn)
540 return result
541
542
543 class LUGroupRemove(LogicalUnit):
544 HPATH = "group-remove"
545 HTYPE = constants.HTYPE_GROUP
546 REQ_BGL = False
547
548 def ExpandNames(self):
549 # This will raises errors.OpPrereqError on its own:
550 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
551 self.needed_locks = {
552 locking.LEVEL_NODEGROUP: [self.group_uuid],
553 }
554
555 def CheckPrereq(self):
556 """Check prerequisites.
557
558 This checks that the given group name exists as a node group, that is
559 empty (i.e., contains no nodes), and that is not the last group of the
560 cluster.
561
562 """
563 # Verify that the group is empty.
564 group_nodes = [node.name
565 for node in self.cfg.GetAllNodesInfo().values()
566 if node.group == self.group_uuid]
567
568 if group_nodes:
569 raise errors.OpPrereqError("Group '%s' not empty, has the following"
570 " nodes: %s" %
571 (self.op.group_name,
572 utils.CommaJoin(utils.NiceSort(group_nodes))),
573 errors.ECODE_STATE)
574
575 # Verify the cluster would not be left group-less.
576 if len(self.cfg.GetNodeGroupList()) == 1:
577 raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
578 " removed" % self.op.group_name,
579 errors.ECODE_STATE)
580
581 def BuildHooksEnv(self):
582 """Build hooks env.
583
584 """
585 return {
586 "GROUP_NAME": self.op.group_name,
587 }
588
589 def BuildHooksNodes(self):
590 """Build hooks nodes.
591
592 """
593 mn = self.cfg.GetMasterNode()
594 return ([mn], [mn])
595
596 def Exec(self, feedback_fn):
597 """Remove the node group.
598
599 """
600 try:
601 self.cfg.RemoveNodeGroup(self.group_uuid)
602 except errors.ConfigurationError:
603 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
604 (self.op.group_name, self.group_uuid))
605
606 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
607
608
609 class LUGroupRename(LogicalUnit):
610 HPATH = "group-rename"
611 HTYPE = constants.HTYPE_GROUP
612 REQ_BGL = False
613
614 def ExpandNames(self):
615 # This raises errors.OpPrereqError on its own:
616 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
617
618 self.needed_locks = {
619 locking.LEVEL_NODEGROUP: [self.group_uuid],
620 }
621
622 def CheckPrereq(self):
623 """Check prerequisites.
624
625 Ensures requested new name is not yet used.
626
627 """
628 try:
629 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
630 except errors.OpPrereqError:
631 pass
632 else:
633 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
634 " node group (UUID: %s)" %
635 (self.op.new_name, new_name_uuid),
636 errors.ECODE_EXISTS)
637
638 def BuildHooksEnv(self):
639 """Build hooks env.
640
641 """
642 return {
643 "OLD_NAME": self.op.group_name,
644 "NEW_NAME": self.op.new_name,
645 }
646
647 def BuildHooksNodes(self):
648 """Build hooks nodes.
649
650 """
651 mn = self.cfg.GetMasterNode()
652
653 all_nodes = self.cfg.GetAllNodesInfo()
654 all_nodes.pop(mn, None)
655
656 run_nodes = [mn]
657 run_nodes.extend(node.name for node in all_nodes.values()
658 if node.group == self.group_uuid)
659
660 return (run_nodes, run_nodes)
661
662 def Exec(self, feedback_fn):
663 """Rename the node group.
664
665 """
666 group = self.cfg.GetNodeGroup(self.group_uuid)
667
668 if group is None:
669 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
670 (self.op.group_name, self.group_uuid))
671
672 group.name = self.op.new_name
673 self.cfg.Update(group, feedback_fn)
674
675 return self.op.new_name
676
677
678 class LUGroupEvacuate(LogicalUnit):
679 HPATH = "group-evacuate"
680 HTYPE = constants.HTYPE_GROUP
681 REQ_BGL = False
682
683 def ExpandNames(self):
684 # This raises errors.OpPrereqError on its own:
685 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
686
687 if self.op.target_groups:
688 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
689 self.op.target_groups)
690 else:
691 self.req_target_uuids = []
692
693 if self.group_uuid in self.req_target_uuids:
694 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
695 " as a target group (targets are %s)" %
696 (self.group_uuid,
697 utils.CommaJoin(self.req_target_uuids)),
698 errors.ECODE_INVAL)
699
700 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
701
702 self.share_locks = ShareAll()
703 self.needed_locks = {
704 locking.LEVEL_INSTANCE: [],
705 locking.LEVEL_NODEGROUP: [],
706 locking.LEVEL_NODE: [],
707 }
708
709 def DeclareLocks(self, level):
710 if level == locking.LEVEL_INSTANCE:
711 assert not self.needed_locks[locking.LEVEL_INSTANCE]
712
713 # Lock instances optimistically, needs verification once node and group
714 # locks have been acquired
715 self.needed_locks[locking.LEVEL_INSTANCE] = \
716 self.cfg.GetNodeGroupInstances(self.group_uuid)
717
718 elif level == locking.LEVEL_NODEGROUP:
719 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
720
721 if self.req_target_uuids:
722 lock_groups = set([self.group_uuid] + self.req_target_uuids)
723
724 # Lock all groups used by instances optimistically; this requires going
725 # via the node before it's locked, requiring verification later on
726 lock_groups.update(group_uuid
727 for instance_name in
728 self.owned_locks(locking.LEVEL_INSTANCE)
729 for group_uuid in
730 self.cfg.GetInstanceNodeGroups(instance_name))
731 else:
732 # No target groups, need to lock all of them
733 lock_groups = locking.ALL_SET
734
735 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
736
737 elif level == locking.LEVEL_NODE:
738 # This will only lock the nodes in the group to be evacuated which
739 # contain actual instances
740 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
741 self._LockInstancesNodes()
742
743 # Lock all nodes in group to be evacuated and target groups
744 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
745 assert self.group_uuid in owned_groups
746 member_nodes = [node_name
747 for group in owned_groups
748 for node_name in self.cfg.GetNodeGroup(group).members]
749 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
750
751 def CheckPrereq(self):
752 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
753 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
754 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
755
756 assert owned_groups.issuperset(self.req_target_uuids)
757 assert self.group_uuid in owned_groups
758
759 # Check if locked instances are still correct
760 CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
761
762 # Get instance information
763 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
764
765 # Check if node groups for locked instances are still correct
766 CheckInstancesNodeGroups(self.cfg, self.instances,
767 owned_groups, owned_nodes, self.group_uuid)
768
769 if self.req_target_uuids:
770 # User requested specific target groups
771 self.target_uuids = self.req_target_uuids
772 else:
773 # All groups except the one to be evacuated are potential targets
774 self.target_uuids = [group_uuid for group_uuid in owned_groups
775 if group_uuid != self.group_uuid]
776
777 if not self.target_uuids:
778 raise errors.OpPrereqError("There are no possible target groups",
779 errors.ECODE_INVAL)
780
781 def BuildHooksEnv(self):
782 """Build hooks env.
783
784 """
785 return {
786 "GROUP_NAME": self.op.group_name,
787 "TARGET_GROUPS": " ".join(self.target_uuids),
788 }
789
790 def BuildHooksNodes(self):
791 """Build hooks nodes.
792
793 """
794 mn = self.cfg.GetMasterNode()
795
796 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
797
798 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
799
800 return (run_nodes, run_nodes)
801
802 def Exec(self, feedback_fn):
803 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
804
805 assert self.group_uuid not in self.target_uuids
806
807 req = iallocator.IAReqGroupChange(instances=instances,
808 target_groups=self.target_uuids)
809 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
810
811 ial.Run(self.op.iallocator)
812
813 if not ial.success:
814 raise errors.OpPrereqError("Can't compute group evacuation using"
815 " iallocator '%s': %s" %
816 (self.op.iallocator, ial.info),
817 errors.ECODE_NORES)
818
819 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
820
821 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
822 len(jobs), self.op.group_name)
823
824 return ResultWithJobs(jobs)
825
826
827 class LUGroupVerifyDisks(NoHooksLU):
828 """Verifies the status of all disks in a node group.
829
830 """
831 REQ_BGL = False
832
833 def ExpandNames(self):
834 # Raises errors.OpPrereqError on its own if group can't be found
835 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
836
837 self.share_locks = ShareAll()
838 self.needed_locks = {
839 locking.LEVEL_INSTANCE: [],
840 locking.LEVEL_NODEGROUP: [],
841 locking.LEVEL_NODE: [],
842
843 # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
844 # starts one instance of this opcode for every group, which means all
845 # nodes will be locked for a short amount of time, so it's better to
846 # acquire the node allocation lock as well.
847 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
848 }
849
850 def DeclareLocks(self, level):
851 if level == locking.LEVEL_INSTANCE:
852 assert not self.needed_locks[locking.LEVEL_INSTANCE]
853
854 # Lock instances optimistically, needs verification once node and group
855 # locks have been acquired
856 self.needed_locks[locking.LEVEL_INSTANCE] = \
857 self.cfg.GetNodeGroupInstances(self.group_uuid)
858
859 elif level == locking.LEVEL_NODEGROUP:
860 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
861
862 self.needed_locks[locking.LEVEL_NODEGROUP] = \
863 set([self.group_uuid] +
864 # Lock all groups used by instances optimistically; this requires
865 # going via the node before it's locked, requiring verification
866 # later on
867 [group_uuid
868 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
869 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
870
871 elif level == locking.LEVEL_NODE:
872 # This will only lock the nodes in the group to be verified which contain
873 # actual instances
874 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
875 self._LockInstancesNodes()
876
877 # Lock all nodes in group to be verified
878 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
879 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
880 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
881
882 def CheckPrereq(self):
883 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
884 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
885 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
886
887 assert self.group_uuid in owned_groups
888
889 # Check if locked instances are still correct
890 CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
891
892 # Get instance information
893 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
894
895 # Check if node groups for locked instances are still correct
896 CheckInstancesNodeGroups(self.cfg, self.instances,
897 owned_groups, owned_nodes, self.group_uuid)
898
899 def Exec(self, feedback_fn):
900 """Verify integrity of cluster disks.
901
902 @rtype: tuple of three items
903 @return: a tuple of (dict of node-to-node_error, list of instances
904 which need activate-disks, dict of instance: (node, volume) for
905 missing volumes
906
907 """
908 res_nodes = {}
909 res_instances = set()
910 res_missing = {}
911
912 nv_dict = MapInstanceDisksToNodes(
913 [inst for inst in self.instances.values()
914 if inst.admin_state == constants.ADMINST_UP])
915
916 if nv_dict:
917 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
918 set(self.cfg.GetVmCapableNodeList()))
919
920 node_lvs = self.rpc.call_lv_list(nodes, [])
921
922 for (node, node_res) in node_lvs.items():
923 if node_res.offline:
924 continue
925
926 msg = node_res.fail_msg
927 if msg:
928 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
929 res_nodes[node] = msg
930 continue
931
932 for lv_name, (_, _, lv_online) in node_res.payload.items():
933 inst = nv_dict.pop((node, lv_name), None)
934 if not (lv_online or inst is None):
935 res_instances.add(inst)
936
937 # any leftover items in nv_dict are missing LVs, let's arrange the data
938 # better
939 for key, inst in nv_dict.iteritems():
940 res_missing.setdefault(inst, []).append(list(key))
941
942 return (res_nodes, list(res_instances), res_missing)