28370d90a8634733a36fda47b1a771e557158aad
[ganeti-github.git] / lib / cmdlib / cluster / __init__.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Logical units dealing with the cluster."""
32
33 import copy
34 import itertools
35 import logging
36 import operator
37 import os
38 import re
39 import time
40
41 from ganeti import compat
42 from ganeti import constants
43 from ganeti import errors
44 from ganeti import hypervisor
45 from ganeti import locking
46 from ganeti import masterd
47 from ganeti import netutils
48 from ganeti import objects
49 from ganeti import opcodes
50 from ganeti import pathutils
51 from ganeti import query
52 import ganeti.rpc.node as rpc
53 from ganeti import runtime
54 from ganeti import ssh
55 from ganeti import uidpool
56 from ganeti import utils
57 from ganeti import vcluster
58
59 from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
60 ResultWithJobs
61 from ganeti.cmdlib.common import ShareAll, RunPostHook, \
62 ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
63 GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
64 GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
65 CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
66 ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
67 CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
68 CheckDiskAccessModeConsistency, GetClientCertDigest, \
69 AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
70 CheckImageValidity, EnsureKvmdOnNodes
71
72 import ganeti.masterd.instance
73
74
75 class LUClusterRenewCrypto(NoHooksLU):
76 """Renew the cluster's crypto tokens.
77
78 """
79
80 _MAX_NUM_RETRIES = 3
81 REQ_BGL = False
82
83 def ExpandNames(self):
84 self.needed_locks = {
85 locking.LEVEL_NODE: locking.ALL_SET,
86 }
87 self.share_locks = ShareAll()
88 self.share_locks[locking.LEVEL_NODE] = 0
89
90 def CheckPrereq(self):
91 """Check prerequisites.
92
93 Notably the compatibility of specified key bits and key type.
94
95 """
96 cluster_info = self.cfg.GetClusterInfo()
97
98 self.ssh_key_type = self.op.ssh_key_type
99 if self.ssh_key_type is None:
100 self.ssh_key_type = cluster_info.ssh_key_type
101
102 self.ssh_key_bits = ssh.DetermineKeyBits(self.ssh_key_type,
103 self.op.ssh_key_bits,
104 cluster_info.ssh_key_type,
105 cluster_info.ssh_key_bits)
106
107 def _RenewNodeSslCertificates(self, feedback_fn):
108 """Renews the nodes' SSL certificates.
109
110 Note that most of this operation is done in gnt_cluster.py, this LU only
111 takes care of the renewal of the client SSL certificates.
112
113 """
114 master_uuid = self.cfg.GetMasterNode()
115 cluster = self.cfg.GetClusterInfo()
116
117 logging.debug("Renewing the master's SSL node certificate."
118 " Master's UUID: %s.", master_uuid)
119
120 # mapping node UUIDs to client certificate digests
121 digest_map = {}
122 master_digest = utils.GetCertificateDigest(
123 cert_filename=pathutils.NODED_CLIENT_CERT_FILE)
124 digest_map[master_uuid] = master_digest
125 logging.debug("Adding the master's SSL node certificate digest to the"
126 " configuration. Master's UUID: %s, Digest: %s",
127 master_uuid, master_digest)
128
129 node_errors = {}
130 nodes = self.cfg.GetAllNodesInfo()
131 logging.debug("Renewing non-master nodes' node certificates.")
132 for (node_uuid, node_info) in nodes.items():
133 if node_info.offline:
134 logging.info("* Skipping offline node %s", node_info.name)
135 continue
136 if node_uuid != master_uuid:
137 logging.debug("Adding certificate digest of node '%s'.", node_uuid)
138 last_exception = None
139 for i in range(self._MAX_NUM_RETRIES):
140 try:
141 if node_info.master_candidate:
142 node_digest = GetClientCertDigest(self, node_uuid)
143 digest_map[node_uuid] = node_digest
144 logging.debug("Added the node's certificate to candidate"
145 " certificate list. Current list: %s.",
146 str(cluster.candidate_certs))
147 break
148 except errors.OpExecError as e:
149 last_exception = e
150 logging.error("Could not fetch a non-master node's SSL node"
151 " certificate at attempt no. %s. The node's UUID"
152 " is %s, and the error was: %s.",
153 str(i), node_uuid, e)
154 else:
155 if last_exception:
156 node_errors[node_uuid] = last_exception
157
158 if node_errors:
159 msg = ("Some nodes' SSL client certificates could not be fetched."
160 " Please make sure those nodes are reachable and rerun"
161 " the operation. The affected nodes and their errors are:\n")
162 for uuid, e in node_errors.items():
163 msg += "Node %s: %s\n" % (uuid, e)
164 feedback_fn(msg)
165
166 self.cfg.SetCandidateCerts(digest_map)
167
168 def _RenewSshKeys(self, feedback_fn):
169 """Renew all nodes' SSH keys.
170
171 @type feedback_fn: function
172 @param feedback_fn: logging function, see L{ganeti.cmdlist.base.LogicalUnit}
173
174 """
175 master_uuid = self.cfg.GetMasterNode()
176
177 nodes = self.cfg.GetAllNodesInfo()
178 nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info)
179 in nodes.items() if not node_info.offline]
180 node_names = [name for (_, name) in nodes_uuid_names]
181 node_uuids = [uuid for (uuid, _) in nodes_uuid_names]
182 potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
183 master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
184
185 cluster_info = self.cfg.GetClusterInfo()
186
187 result = self.rpc.call_node_ssh_keys_renew(
188 [master_uuid],
189 node_uuids, node_names,
190 master_candidate_uuids,
191 potential_master_candidates,
192 cluster_info.ssh_key_type, # Old key type
193 self.ssh_key_type, # New key type
194 self.ssh_key_bits, # New key bits
195 self.op.debug,
196 self.op.verbose)
197 result[master_uuid].Raise("Could not renew the SSH keys of all nodes")
198
199 # After the keys have been successfully swapped, time to commit the change
200 # in key type
201 cluster_info.ssh_key_type = self.ssh_key_type
202 cluster_info.ssh_key_bits = self.ssh_key_bits
203 self.cfg.Update(cluster_info, feedback_fn)
204
205 def Exec(self, feedback_fn):
206 if self.op.node_certificates:
207 feedback_fn("Renewing Node SSL certificates")
208 self._RenewNodeSslCertificates(feedback_fn)
209
210 if self.op.renew_ssh_keys:
211 if self.cfg.GetClusterInfo().modify_ssh_setup:
212 feedback_fn("Renewing SSH keys")
213 self._RenewSshKeys(feedback_fn)
214 else:
215 feedback_fn("Cannot renew SSH keys if the cluster is configured to not"
216 " modify the SSH setup.")
217
218
219 class LUClusterActivateMasterIp(NoHooksLU):
220 """Activate the master IP on the master node.
221
222 """
223 def Exec(self, feedback_fn):
224 """Activate the master IP.
225
226 """
227 master_params = self.cfg.GetMasterNetworkParameters()
228 ems = self.cfg.GetUseExternalMipScript()
229 result = self.rpc.call_node_activate_master_ip(master_params.uuid,
230 master_params, ems)
231 result.Raise("Could not activate the master IP")
232
233
234 class LUClusterDeactivateMasterIp(NoHooksLU):
235 """Deactivate the master IP on the master node.
236
237 """
238 def Exec(self, feedback_fn):
239 """Deactivate the master IP.
240
241 """
242 master_params = self.cfg.GetMasterNetworkParameters()
243 ems = self.cfg.GetUseExternalMipScript()
244 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
245 master_params, ems)
246 result.Raise("Could not deactivate the master IP")
247
248
249 class LUClusterConfigQuery(NoHooksLU):
250 """Return configuration values.
251
252 """
253 REQ_BGL = False
254
255 def CheckArguments(self):
256 self.cq = ClusterQuery(None, self.op.output_fields, False)
257
258 def ExpandNames(self):
259 self.cq.ExpandNames(self)
260
261 def DeclareLocks(self, level):
262 self.cq.DeclareLocks(self, level)
263
264 def Exec(self, feedback_fn):
265 result = self.cq.OldStyleQuery(self)
266
267 assert len(result) == 1
268
269 return result[0]
270
271
272 class LUClusterDestroy(LogicalUnit):
273 """Logical unit for destroying the cluster.
274
275 """
276 HPATH = "cluster-destroy"
277 HTYPE = constants.HTYPE_CLUSTER
278
279 # Read by the job queue to detect when the cluster is gone and job files will
280 # never be available.
281 # FIXME: This variable should be removed together with the Python job queue.
282 clusterHasBeenDestroyed = False
283
284 def BuildHooksEnv(self):
285 """Build hooks env.
286
287 """
288 return {
289 "OP_TARGET": self.cfg.GetClusterName(),
290 }
291
292 def BuildHooksNodes(self):
293 """Build hooks nodes.
294
295 """
296 return ([], [])
297
298 def CheckPrereq(self):
299 """Check prerequisites.
300
301 This checks whether the cluster is empty.
302
303 Any errors are signaled by raising errors.OpPrereqError.
304
305 """
306 master = self.cfg.GetMasterNode()
307
308 nodelist = self.cfg.GetNodeList()
309 if len(nodelist) != 1 or nodelist[0] != master:
310 raise errors.OpPrereqError("There are still %d node(s) in"
311 " this cluster." % (len(nodelist) - 1),
312 errors.ECODE_INVAL)
313 instancelist = self.cfg.GetInstanceList()
314 if instancelist:
315 raise errors.OpPrereqError("There are still %d instance(s) in"
316 " this cluster." % len(instancelist),
317 errors.ECODE_INVAL)
318
319 def Exec(self, feedback_fn):
320 """Destroys the cluster.
321
322 """
323 master_params = self.cfg.GetMasterNetworkParameters()
324
325 # Run post hooks on master node before it's removed
326 RunPostHook(self, master_params.uuid)
327
328 ems = self.cfg.GetUseExternalMipScript()
329 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
330 master_params, ems)
331 result.Warn("Error disabling the master IP address", self.LogWarning)
332
333 self.wconfd.Client().PrepareClusterDestruction(self.wconfdcontext)
334
335 # signal to the job queue that the cluster is gone
336 LUClusterDestroy.clusterHasBeenDestroyed = True
337
338 return master_params.uuid
339
340
341 class LUClusterPostInit(LogicalUnit):
342 """Logical unit for running hooks after cluster initialization.
343
344 """
345 HPATH = "cluster-init"
346 HTYPE = constants.HTYPE_CLUSTER
347
348 def CheckArguments(self):
349 self.master_uuid = self.cfg.GetMasterNode()
350 self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
351
352 # TODO: When Issue 584 is solved, and None is properly parsed when used
353 # as a default value, ndparams.get(.., None) can be changed to
354 # ndparams[..] to access the values directly
355
356 # OpenvSwitch: Warn user if link is missing
357 if (self.master_ndparams[constants.ND_OVS] and not
358 self.master_ndparams.get(constants.ND_OVS_LINK, None)):
359 self.LogInfo("No physical interface for OpenvSwitch was given."
360 " OpenvSwitch will not have an outside connection. This"
361 " might not be what you want.")
362
363 def BuildHooksEnv(self):
364 """Build hooks env.
365
366 """
367 return {
368 "OP_TARGET": self.cfg.GetClusterName(),
369 }
370
371 def BuildHooksNodes(self):
372 """Build hooks nodes.
373
374 """
375 return ([], [self.cfg.GetMasterNode()])
376
377 def Exec(self, feedback_fn):
378 """Create and configure Open vSwitch
379
380 """
381 if self.master_ndparams[constants.ND_OVS]:
382 result = self.rpc.call_node_configure_ovs(
383 self.master_uuid,
384 self.master_ndparams[constants.ND_OVS_NAME],
385 self.master_ndparams.get(constants.ND_OVS_LINK, None))
386 result.Raise("Could not successully configure Open vSwitch")
387
388 return True
389
390
391 class ClusterQuery(QueryBase):
392 FIELDS = query.CLUSTER_FIELDS
393
394 #: Do not sort (there is only one item)
395 SORT_FIELD = None
396
397 def ExpandNames(self, lu):
398 lu.needed_locks = {}
399
400 # The following variables interact with _QueryBase._GetNames
401 self.wanted = locking.ALL_SET
402 self.do_locking = self.use_locking
403
404 if self.do_locking:
405 raise errors.OpPrereqError("Can not use locking for cluster queries",
406 errors.ECODE_INVAL)
407
408 def DeclareLocks(self, lu, level):
409 pass
410
411 def _GetQueryData(self, lu):
412 """Computes the list of nodes and their attributes.
413
414 """
415 if query.CQ_CONFIG in self.requested_data:
416 cluster = lu.cfg.GetClusterInfo()
417 nodes = lu.cfg.GetAllNodesInfo()
418 else:
419 cluster = NotImplemented
420 nodes = NotImplemented
421
422 if query.CQ_QUEUE_DRAINED in self.requested_data:
423 drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
424 else:
425 drain_flag = NotImplemented
426
427 if query.CQ_WATCHER_PAUSE in self.requested_data:
428 master_node_uuid = lu.cfg.GetMasterNode()
429
430 result = lu.rpc.call_get_watcher_pause(master_node_uuid)
431 result.Raise("Can't retrieve watcher pause from master node '%s'" %
432 lu.cfg.GetMasterNodeName())
433
434 watcher_pause = result.payload
435 else:
436 watcher_pause = NotImplemented
437
438 return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
439
440
441 class LUClusterQuery(NoHooksLU):
442 """Query cluster configuration.
443
444 """
445 REQ_BGL = False
446
447 def ExpandNames(self):
448 self.needed_locks = {}
449
450 def Exec(self, feedback_fn):
451 """Return cluster config.
452
453 """
454 cluster = self.cfg.GetClusterInfo()
455 os_hvp = {}
456
457 # Filter just for enabled hypervisors
458 for os_name, hv_dict in cluster.os_hvp.items():
459 os_hvp[os_name] = {}
460 for hv_name, hv_params in hv_dict.items():
461 if hv_name in cluster.enabled_hypervisors:
462 os_hvp[os_name][hv_name] = hv_params
463
464 # Convert ip_family to ip_version
465 primary_ip_version = constants.IP4_VERSION
466 if cluster.primary_ip_family == netutils.IP6Address.family:
467 primary_ip_version = constants.IP6_VERSION
468
469 result = {
470 "software_version": constants.RELEASE_VERSION,
471 "protocol_version": constants.PROTOCOL_VERSION,
472 "config_version": constants.CONFIG_VERSION,
473 "os_api_version": max(constants.OS_API_VERSIONS),
474 "export_version": constants.EXPORT_VERSION,
475 "vcs_version": constants.VCS_VERSION,
476 "architecture": runtime.GetArchInfo(),
477 "name": cluster.cluster_name,
478 "master": self.cfg.GetMasterNodeName(),
479 "default_hypervisor": cluster.primary_hypervisor,
480 "enabled_hypervisors": cluster.enabled_hypervisors,
481 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
482 for hypervisor_name in cluster.enabled_hypervisors]),
483 "os_hvp": os_hvp,
484 "beparams": cluster.beparams,
485 "osparams": cluster.osparams,
486 "ipolicy": cluster.ipolicy,
487 "nicparams": cluster.nicparams,
488 "ndparams": cluster.ndparams,
489 "diskparams": cluster.diskparams,
490 "candidate_pool_size": cluster.candidate_pool_size,
491 "max_running_jobs": cluster.max_running_jobs,
492 "max_tracked_jobs": cluster.max_tracked_jobs,
493 "mac_prefix": cluster.mac_prefix,
494 "master_netdev": cluster.master_netdev,
495 "master_netmask": cluster.master_netmask,
496 "use_external_mip_script": cluster.use_external_mip_script,
497 "volume_group_name": cluster.volume_group_name,
498 "drbd_usermode_helper": cluster.drbd_usermode_helper,
499 "file_storage_dir": cluster.file_storage_dir,
500 "shared_file_storage_dir": cluster.shared_file_storage_dir,
501 "maintain_node_health": cluster.maintain_node_health,
502 "ctime": cluster.ctime,
503 "mtime": cluster.mtime,
504 "uuid": cluster.uuid,
505 "tags": list(cluster.GetTags()),
506 "uid_pool": cluster.uid_pool,
507 "default_iallocator": cluster.default_iallocator,
508 "default_iallocator_params": cluster.default_iallocator_params,
509 "reserved_lvs": cluster.reserved_lvs,
510 "primary_ip_version": primary_ip_version,
511 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
512 "hidden_os": cluster.hidden_os,
513 "blacklisted_os": cluster.blacklisted_os,
514 "enabled_disk_templates": cluster.enabled_disk_templates,
515 "install_image": cluster.install_image,
516 "instance_communication_network": cluster.instance_communication_network,
517 "compression_tools": cluster.compression_tools,
518 "enabled_user_shutdown": cluster.enabled_user_shutdown,
519 }
520
521 return result
522
523
524 class LUClusterRedistConf(NoHooksLU):
525 """Force the redistribution of cluster configuration.
526
527 This is a very simple LU.
528
529 """
530 REQ_BGL = False
531
532 def ExpandNames(self):
533 self.needed_locks = {
534 locking.LEVEL_NODE: locking.ALL_SET,
535 }
536 self.share_locks = ShareAll()
537
538 def Exec(self, feedback_fn):
539 """Redistribute the configuration.
540
541 """
542 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
543 RedistributeAncillaryFiles(self)
544
545
546 class LUClusterRename(LogicalUnit):
547 """Rename the cluster.
548
549 """
550 HPATH = "cluster-rename"
551 HTYPE = constants.HTYPE_CLUSTER
552
553 def BuildHooksEnv(self):
554 """Build hooks env.
555
556 """
557 return {
558 "OP_TARGET": self.cfg.GetClusterName(),
559 "NEW_NAME": self.op.name,
560 }
561
562 def BuildHooksNodes(self):
563 """Build hooks nodes.
564
565 """
566 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
567
568 def CheckPrereq(self):
569 """Verify that the passed name is a valid one.
570
571 """
572 hostname = netutils.GetHostname(name=self.op.name,
573 family=self.cfg.GetPrimaryIPFamily())
574
575 new_name = hostname.name
576 self.ip = new_ip = hostname.ip
577 old_name = self.cfg.GetClusterName()
578 old_ip = self.cfg.GetMasterIP()
579 if new_name == old_name and new_ip == old_ip:
580 raise errors.OpPrereqError("Neither the name nor the IP address of the"
581 " cluster has changed",
582 errors.ECODE_INVAL)
583 if new_ip != old_ip:
584 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
585 raise errors.OpPrereqError("The given cluster IP address (%s) is"
586 " reachable on the network" %
587 new_ip, errors.ECODE_NOTUNIQUE)
588
589 self.op.name = new_name
590
591 def Exec(self, feedback_fn):
592 """Rename the cluster.
593
594 """
595 clustername = self.op.name
596 new_ip = self.ip
597
598 # shutdown the master IP
599 master_params = self.cfg.GetMasterNetworkParameters()
600 ems = self.cfg.GetUseExternalMipScript()
601 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
602 master_params, ems)
603 result.Raise("Could not disable the master role")
604
605 try:
606 cluster = self.cfg.GetClusterInfo()
607 cluster.cluster_name = clustername
608 cluster.master_ip = new_ip
609 self.cfg.Update(cluster, feedback_fn)
610
611 # update the known hosts file
612 ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
613 node_list = self.cfg.GetOnlineNodeList()
614 try:
615 node_list.remove(master_params.uuid)
616 except ValueError:
617 pass
618 UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
619 finally:
620 master_params.ip = new_ip
621 result = self.rpc.call_node_activate_master_ip(master_params.uuid,
622 master_params, ems)
623 result.Warn("Could not re-enable the master role on the master,"
624 " please restart manually", self.LogWarning)
625
626 return clustername
627
628
629 class LUClusterRepairDiskSizes(NoHooksLU):
630 """Verifies the cluster disks sizes.
631
632 """
633 REQ_BGL = False
634
635 def ExpandNames(self):
636 if self.op.instances:
637 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
638 # Not getting the node allocation lock as only a specific set of
639 # instances (and their nodes) is going to be acquired
640 self.needed_locks = {
641 locking.LEVEL_NODE_RES: [],
642 locking.LEVEL_INSTANCE: self.wanted_names,
643 }
644 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
645 else:
646 self.wanted_names = None
647 self.needed_locks = {
648 locking.LEVEL_NODE_RES: locking.ALL_SET,
649 locking.LEVEL_INSTANCE: locking.ALL_SET,
650 }
651
652 self.share_locks = {
653 locking.LEVEL_NODE_RES: 1,
654 locking.LEVEL_INSTANCE: 0,
655 }
656
657 def DeclareLocks(self, level):
658 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
659 self._LockInstancesNodes(primary_only=True, level=level)
660
661 def CheckPrereq(self):
662 """Check prerequisites.
663
664 This only checks the optional instance list against the existing names.
665
666 """
667 if self.wanted_names is None:
668 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
669
670 self.wanted_instances = [
671 info
672 for (_, info) in self.cfg.GetMultiInstanceInfoByName(self.wanted_names)
673 ]
674
675 def _EnsureChildSizes(self, disk):
676 """Ensure children of the disk have the needed disk size.
677
678 This is valid mainly for DRBD8 and fixes an issue where the
679 children have smaller disk size.
680
681 @param disk: an L{ganeti.objects.Disk} object
682
683 """
684 if disk.dev_type == constants.DT_DRBD8:
685 assert disk.children, "Empty children for DRBD8?"
686 fchild = disk.children[0]
687 mismatch = fchild.size < disk.size
688 if mismatch:
689 self.LogInfo("Child disk has size %d, parent %d, fixing",
690 fchild.size, disk.size)
691 fchild.size = disk.size
692
693 # and we recurse on this child only, not on the metadev
694 return self._EnsureChildSizes(fchild) or mismatch
695 else:
696 return False
697
698 def Exec(self, feedback_fn):
699 """Verify the size of cluster disks.
700
701 """
702 # TODO: check child disks too
703 # TODO: check differences in size between primary/secondary nodes
704 per_node_disks = {}
705 for instance in self.wanted_instances:
706 pnode = instance.primary_node
707 if pnode not in per_node_disks:
708 per_node_disks[pnode] = []
709 for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
710 per_node_disks[pnode].append((instance, idx, disk))
711
712 assert not (frozenset(per_node_disks.keys()) -
713 frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \
714 "Not owning correct locks"
715 assert not self.owned_locks(locking.LEVEL_NODE)
716
717 es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
718 per_node_disks.keys())
719
720 changed = []
721 for node_uuid, dskl in per_node_disks.items():
722 if not dskl:
723 # no disks on the node
724 continue
725
726 newl = [([v[2].Copy()], v[0]) for v in dskl]
727 node_name = self.cfg.GetNodeName(node_uuid)
728 result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
729 if result.fail_msg:
730 self.LogWarning("Failure in blockdev_getdimensions call to node"
731 " %s, ignoring", node_name)
732 continue
733 if len(result.payload) != len(dskl):
734 logging.warning("Invalid result from node %s: len(dksl)=%d,"
735 " result.payload=%s", node_name, len(dskl),
736 result.payload)
737 self.LogWarning("Invalid result from node %s, ignoring node results",
738 node_name)
739 continue
740 for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
741 if dimensions is None:
742 self.LogWarning("Disk %d of instance %s did not return size"
743 " information, ignoring", idx, instance.name)
744 continue
745 if not isinstance(dimensions, (tuple, list)):
746 self.LogWarning("Disk %d of instance %s did not return valid"
747 " dimension information, ignoring", idx,
748 instance.name)
749 continue
750 (size, spindles) = dimensions
751 if not isinstance(size, (int, long)):
752 self.LogWarning("Disk %d of instance %s did not return valid"
753 " size information, ignoring", idx, instance.name)
754 continue
755 size = size >> 20
756 if size != disk.size:
757 self.LogInfo("Disk %d of instance %s has mismatched size,"
758 " correcting: recorded %d, actual %d", idx,
759 instance.name, disk.size, size)
760 disk.size = size
761 self.cfg.Update(disk, feedback_fn)
762 changed.append((instance.name, idx, "size", size))
763 if es_flags[node_uuid]:
764 if spindles is None:
765 self.LogWarning("Disk %d of instance %s did not return valid"
766 " spindles information, ignoring", idx,
767 instance.name)
768 elif disk.spindles is None or disk.spindles != spindles:
769 self.LogInfo("Disk %d of instance %s has mismatched spindles,"
770 " correcting: recorded %s, actual %s",
771 idx, instance.name, disk.spindles, spindles)
772 disk.spindles = spindles
773 self.cfg.Update(disk, feedback_fn)
774 changed.append((instance.name, idx, "spindles", disk.spindles))
775 if self._EnsureChildSizes(disk):
776 self.cfg.Update(disk, feedback_fn)
777 changed.append((instance.name, idx, "size", disk.size))
778 return changed
779
780
781 def _ValidateNetmask(cfg, netmask):
782 """Checks if a netmask is valid.
783
784 @type cfg: L{config.ConfigWriter}
785 @param cfg: cluster configuration
786 @type netmask: int
787 @param netmask: netmask to be verified
788 @raise errors.OpPrereqError: if the validation fails
789
790 """
791 ip_family = cfg.GetPrimaryIPFamily()
792 try:
793 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
794 except errors.ProgrammerError:
795 raise errors.OpPrereqError("Invalid primary ip family: %s." %
796 ip_family, errors.ECODE_INVAL)
797 if not ipcls.ValidateNetmask(netmask):
798 raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
799 (netmask), errors.ECODE_INVAL)
800
801
802 def CheckFileBasedStoragePathVsEnabledDiskTemplates(
803 logging_warn_fn, file_storage_dir, enabled_disk_templates,
804 file_disk_template):
805 """Checks whether the given file-based storage directory is acceptable.
806
807 Note: This function is public, because it is also used in bootstrap.py.
808
809 @type logging_warn_fn: function
810 @param logging_warn_fn: function which accepts a string and logs it
811 @type file_storage_dir: string
812 @param file_storage_dir: the directory to be used for file-based instances
813 @type enabled_disk_templates: list of string
814 @param enabled_disk_templates: the list of enabled disk templates
815 @type file_disk_template: string
816 @param file_disk_template: the file-based disk template for which the
817 path should be checked
818
819 """
820 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
821 constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
822 ))
823
824 file_storage_enabled = file_disk_template in enabled_disk_templates
825 if file_storage_dir is not None:
826 if file_storage_dir == "":
827 if file_storage_enabled:
828 raise errors.OpPrereqError(
829 "Unsetting the '%s' storage directory while having '%s' storage"
830 " enabled is not permitted." %
831 (file_disk_template, file_disk_template),
832 errors.ECODE_INVAL)
833 else:
834 if not file_storage_enabled:
835 logging_warn_fn(
836 "Specified a %s storage directory, although %s storage is not"
837 " enabled." % (file_disk_template, file_disk_template))
838 else:
839 raise errors.ProgrammerError("Received %s storage dir with value"
840 " 'None'." % file_disk_template)
841
842
843 def CheckFileStoragePathVsEnabledDiskTemplates(
844 logging_warn_fn, file_storage_dir, enabled_disk_templates):
845 """Checks whether the given file storage directory is acceptable.
846
847 @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
848
849 """
850 CheckFileBasedStoragePathVsEnabledDiskTemplates(
851 logging_warn_fn, file_storage_dir, enabled_disk_templates,
852 constants.DT_FILE)
853
854
855 def CheckSharedFileStoragePathVsEnabledDiskTemplates(
856 logging_warn_fn, file_storage_dir, enabled_disk_templates):
857 """Checks whether the given shared file storage directory is acceptable.
858
859 @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
860
861 """
862 CheckFileBasedStoragePathVsEnabledDiskTemplates(
863 logging_warn_fn, file_storage_dir, enabled_disk_templates,
864 constants.DT_SHARED_FILE)
865
866
867 def CheckGlusterStoragePathVsEnabledDiskTemplates(
868 logging_warn_fn, file_storage_dir, enabled_disk_templates):
869 """Checks whether the given gluster storage directory is acceptable.
870
871 @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
872
873 """
874 CheckFileBasedStoragePathVsEnabledDiskTemplates(
875 logging_warn_fn, file_storage_dir, enabled_disk_templates,
876 constants.DT_GLUSTER)
877
878
879 def CheckCompressionTools(tools):
880 """Check whether the provided compression tools look like executables.
881
882 @type tools: list of string
883 @param tools: The tools provided as opcode input
884
885 """
886 regex = re.compile('^[-_a-zA-Z0-9]+$')
887 illegal_tools = [t for t in tools if not regex.match(t)]
888
889 if illegal_tools:
890 raise errors.OpPrereqError(
891 "The tools '%s' contain illegal characters: only alphanumeric values,"
892 " dashes, and underscores are allowed" % ", ".join(illegal_tools),
893 errors.ECODE_INVAL
894 )
895
896 if constants.IEC_GZIP not in tools:
897 raise errors.OpPrereqError("For compatibility reasons, the %s utility must"
898 " be present among the compression tools" %
899 constants.IEC_GZIP, errors.ECODE_INVAL)
900
901 if constants.IEC_NONE in tools:
902 raise errors.OpPrereqError("%s is a reserved value used for no compression,"
903 " and cannot be used as the name of a tool" %
904 constants.IEC_NONE, errors.ECODE_INVAL)
905
906
907 class LUClusterSetParams(LogicalUnit):
908 """Change the parameters of the cluster.
909
910 """
911 HPATH = "cluster-modify"
912 HTYPE = constants.HTYPE_CLUSTER
913 REQ_BGL = False
914
915 def CheckArguments(self):
916 """Check parameters
917
918 """
919 if self.op.uid_pool:
920 uidpool.CheckUidPool(self.op.uid_pool)
921
922 if self.op.add_uids:
923 uidpool.CheckUidPool(self.op.add_uids)
924
925 if self.op.remove_uids:
926 uidpool.CheckUidPool(self.op.remove_uids)
927
928 if self.op.mac_prefix:
929 self.op.mac_prefix = \
930 utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
931
932 if self.op.master_netmask is not None:
933 _ValidateNetmask(self.cfg, self.op.master_netmask)
934
935 if self.op.diskparams:
936 for dt_params in self.op.diskparams.values():
937 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
938 try:
939 utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
940 CheckDiskAccessModeValidity(self.op.diskparams)
941 except errors.OpPrereqError, err:
942 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
943 errors.ECODE_INVAL)
944
945 if self.op.install_image is not None:
946 CheckImageValidity(self.op.install_image,
947 "Install image must be an absolute path or a URL")
948
949 def ExpandNames(self):
950 # FIXME: in the future maybe other cluster params won't require checking on
951 # all nodes to be modified.
952 # FIXME: This opcode changes cluster-wide settings. Is acquiring all
953 # resource locks the right thing, shouldn't it be the BGL instead?
954 self.needed_locks = {
955 locking.LEVEL_NODE: locking.ALL_SET,
956 locking.LEVEL_INSTANCE: locking.ALL_SET,
957 locking.LEVEL_NODEGROUP: locking.ALL_SET,
958 }
959 self.share_locks = ShareAll()
960
961 def BuildHooksEnv(self):
962 """Build hooks env.
963
964 """
965 return {
966 "OP_TARGET": self.cfg.GetClusterName(),
967 "NEW_VG_NAME": self.op.vg_name,
968 }
969
970 def BuildHooksNodes(self):
971 """Build hooks nodes.
972
973 """
974 mn = self.cfg.GetMasterNode()
975 return ([mn], [mn])
976
977 def _CheckVgName(self, node_uuids, enabled_disk_templates,
978 new_enabled_disk_templates):
979 """Check the consistency of the vg name on all nodes and in case it gets
980 unset whether there are instances still using it.
981
982 """
983 lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
984 lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
985 new_enabled_disk_templates)
986 current_vg_name = self.cfg.GetVGName()
987
988 if self.op.vg_name == '':
989 if lvm_is_enabled:
990 raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
991 " disk templates are or get enabled.",
992 errors.ECODE_INVAL)
993
994 if self.op.vg_name is None:
995 if current_vg_name is None and lvm_is_enabled:
996 raise errors.OpPrereqError("Please specify a volume group when"
997 " enabling lvm-based disk-templates.",
998 errors.ECODE_INVAL)
999
1000 if self.op.vg_name is not None and not self.op.vg_name:
1001 if self.cfg.DisksOfType(constants.DT_PLAIN):
1002 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
1003 " instances exist", errors.ECODE_INVAL)
1004
1005 if (self.op.vg_name is not None and lvm_is_enabled) or \
1006 (self.cfg.GetVGName() is not None and lvm_gets_enabled):
1007 self._CheckVgNameOnNodes(node_uuids)
1008
1009 def _CheckVgNameOnNodes(self, node_uuids):
1010 """Check the status of the volume group on each node.
1011
1012 """
1013 vglist = self.rpc.call_vg_list(node_uuids)
1014 for node_uuid in node_uuids:
1015 msg = vglist[node_uuid].fail_msg
1016 if msg:
1017 # ignoring down node
1018 self.LogWarning("Error while gathering data on node %s"
1019 " (ignoring node): %s",
1020 self.cfg.GetNodeName(node_uuid), msg)
1021 continue
1022 vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload,
1023 self.op.vg_name,
1024 constants.MIN_VG_SIZE)
1025 if vgstatus:
1026 raise errors.OpPrereqError("Error on node '%s': %s" %
1027 (self.cfg.GetNodeName(node_uuid), vgstatus),
1028 errors.ECODE_ENVIRON)
1029
1030 @staticmethod
1031 def _GetDiskTemplateSetsInner(op_enabled_disk_templates,
1032 old_enabled_disk_templates):
1033 """Computes three sets of disk templates.
1034
1035 @see: C{_GetDiskTemplateSets} for more details.
1036
1037 """
1038 enabled_disk_templates = None
1039 new_enabled_disk_templates = []
1040 disabled_disk_templates = []
1041 if op_enabled_disk_templates:
1042 enabled_disk_templates = op_enabled_disk_templates
1043 new_enabled_disk_templates = \
1044 list(set(enabled_disk_templates)
1045 - set(old_enabled_disk_templates))
1046 disabled_disk_templates = \
1047 list(set(old_enabled_disk_templates)
1048 - set(enabled_disk_templates))
1049 else:
1050 enabled_disk_templates = old_enabled_disk_templates
1051 return (enabled_disk_templates, new_enabled_disk_templates,
1052 disabled_disk_templates)
1053
1054 def _GetDiskTemplateSets(self, cluster):
1055 """Computes three sets of disk templates.
1056
1057 The three sets are:
1058 - disk templates that will be enabled after this operation (no matter if
1059 they were enabled before or not)
1060 - disk templates that get enabled by this operation (thus haven't been
1061 enabled before.)
1062 - disk templates that get disabled by this operation
1063
1064 """
1065 return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates,
1066 cluster.enabled_disk_templates)
1067
1068 def _CheckIpolicy(self, cluster, enabled_disk_templates):
1069 """Checks the ipolicy.
1070
1071 @type cluster: C{objects.Cluster}
1072 @param cluster: the cluster's configuration
1073 @type enabled_disk_templates: list of string
1074 @param enabled_disk_templates: list of (possibly newly) enabled disk
1075 templates
1076
1077 """
1078 # FIXME: write unit tests for this
1079 if self.op.ipolicy:
1080 self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
1081 group_policy=False)
1082
1083 CheckIpolicyVsDiskTemplates(self.new_ipolicy,
1084 enabled_disk_templates)
1085
1086 all_instances = self.cfg.GetAllInstancesInfo().values()
1087 violations = set()
1088 for group in self.cfg.GetAllNodeGroupsInfo().values():
1089 instances = frozenset(
1090 [inst for inst in all_instances
1091 if compat.any(nuuid in group.members
1092 for nuuid in self.cfg.GetInstanceNodes(inst.uuid))])
1093 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
1094 ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
1095 new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
1096 self.cfg)
1097 if new:
1098 violations.update(new)
1099
1100 if violations:
1101 self.LogWarning("After the ipolicy change the following instances"
1102 " violate them: %s",
1103 utils.CommaJoin(utils.NiceSort(violations)))
1104 else:
1105 CheckIpolicyVsDiskTemplates(cluster.ipolicy,
1106 enabled_disk_templates)
1107
1108 def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
1109 """Checks whether the set DRBD helper actually exists on the nodes.
1110
1111 @type drbd_helper: string
1112 @param drbd_helper: path of the drbd usermode helper binary
1113 @type node_uuids: list of strings
1114 @param node_uuids: list of node UUIDs to check for the helper
1115
1116 """
1117 # checks given drbd helper on all nodes
1118 helpers = self.rpc.call_drbd_helper(node_uuids)
1119 for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
1120 if ninfo.offline:
1121 self.LogInfo("Not checking drbd helper on offline node %s",
1122 ninfo.name)
1123 continue
1124 msg = helpers[ninfo.uuid].fail_msg
1125 if msg:
1126 raise errors.OpPrereqError("Error checking drbd helper on node"
1127 " '%s': %s" % (ninfo.name, msg),
1128 errors.ECODE_ENVIRON)
1129 node_helper = helpers[ninfo.uuid].payload
1130 if node_helper != drbd_helper:
1131 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
1132 (ninfo.name, node_helper),
1133 errors.ECODE_ENVIRON)
1134
1135 def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
1136 """Check the DRBD usermode helper.
1137
1138 @type node_uuids: list of strings
1139 @param node_uuids: a list of nodes' UUIDs
1140 @type drbd_enabled: boolean
1141 @param drbd_enabled: whether DRBD will be enabled after this operation
1142 (no matter if it was disabled before or not)
1143 @type drbd_gets_enabled: boolen
1144 @param drbd_gets_enabled: true if DRBD was disabled before this
1145 operation, but will be enabled afterwards
1146
1147 """
1148 if self.op.drbd_helper == '':
1149 if drbd_enabled:
1150 raise errors.OpPrereqError("Cannot disable drbd helper while"
1151 " DRBD is enabled.", errors.ECODE_STATE)
1152 if self.cfg.DisksOfType(constants.DT_DRBD8):
1153 raise errors.OpPrereqError("Cannot disable drbd helper while"
1154 " drbd-based instances exist",
1155 errors.ECODE_INVAL)
1156
1157 else:
1158 if self.op.drbd_helper is not None and drbd_enabled:
1159 self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
1160 else:
1161 if drbd_gets_enabled:
1162 current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
1163 if current_drbd_helper is not None:
1164 self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
1165 else:
1166 raise errors.OpPrereqError("Cannot enable DRBD without a"
1167 " DRBD usermode helper set.",
1168 errors.ECODE_STATE)
1169
1170 def _CheckInstancesOfDisabledDiskTemplates(
1171 self, disabled_disk_templates):
1172 """Check whether we try to disable a disk template that is in use.
1173
1174 @type disabled_disk_templates: list of string
1175 @param disabled_disk_templates: list of disk templates that are going to
1176 be disabled by this operation
1177
1178 """
1179 for disk_template in disabled_disk_templates:
1180 disks_with_type = self.cfg.DisksOfType(disk_template)
1181 if disks_with_type:
1182 disk_desc = []
1183 for disk in disks_with_type:
1184 instance_uuid = self.cfg.GetInstanceForDisk(disk.uuid)
1185 instance = self.cfg.GetInstanceInfo(instance_uuid)
1186 if instance:
1187 instance_desc = "on " + instance.name
1188 else:
1189 instance_desc = "detached"
1190 disk_desc.append("%s (%s)" % (disk, instance_desc))
1191 raise errors.OpPrereqError(
1192 "Cannot disable disk template '%s', because there is at least one"
1193 " disk using it:\n * %s" % (disk_template, "\n * ".join(disk_desc)),
1194 errors.ECODE_STATE)
1195 if constants.DT_DISKLESS in disabled_disk_templates:
1196 instances = self.cfg.GetAllInstancesInfo()
1197 for inst in instances.values():
1198 if not inst.disks:
1199 raise errors.OpPrereqError(
1200 "Cannot disable disk template 'diskless', because there is at"
1201 " least one instance using it:\n * %s" % inst.name,
1202 errors.ECODE_STATE)
1203
1204 @staticmethod
1205 def _CheckInstanceCommunicationNetwork(network, warning_fn):
1206 """Check whether an existing network is configured for instance
1207 communication.
1208
1209 Checks whether an existing network is configured with the
1210 parameters that are advisable for instance communication, and
1211 otherwise issue security warnings.
1212
1213 @type network: L{ganeti.objects.Network}
1214 @param network: L{ganeti.objects.Network} object whose
1215 configuration is being checked
1216 @type warning_fn: function
1217 @param warning_fn: function used to print warnings
1218 @rtype: None
1219 @return: None
1220
1221 """
1222 def _MaybeWarn(err, val, default):
1223 if val != default:
1224 warning_fn("Supplied instance communication network '%s' %s '%s',"
1225 " this might pose a security risk (default is '%s').",
1226 network.name, err, val, default)
1227
1228 if network.network is None:
1229 raise errors.OpPrereqError("Supplied instance communication network '%s'"
1230 " must have an IPv4 network address.",
1231 network.name)
1232
1233 _MaybeWarn("has an IPv4 gateway", network.gateway, None)
1234 _MaybeWarn("has a non-standard IPv4 network address", network.network,
1235 constants.INSTANCE_COMMUNICATION_NETWORK4)
1236 _MaybeWarn("has an IPv6 gateway", network.gateway6, None)
1237 _MaybeWarn("has a non-standard IPv6 network address", network.network6,
1238 constants.INSTANCE_COMMUNICATION_NETWORK6)
1239 _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix,
1240 constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
1241
1242 def CheckPrereq(self):
1243 """Check prerequisites.
1244
1245 This checks whether the given params don't conflict and
1246 if the given volume group is valid.
1247
1248 """
1249 node_uuids = self.owned_locks(locking.LEVEL_NODE)
1250 self.cluster = cluster = self.cfg.GetClusterInfo()
1251
1252 vm_capable_node_uuids = [node.uuid
1253 for node in self.cfg.GetAllNodesInfo().values()
1254 if node.uuid in node_uuids and node.vm_capable]
1255
1256 (enabled_disk_templates, new_enabled_disk_templates,
1257 disabled_disk_templates) = self._GetDiskTemplateSets(cluster)
1258 self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates)
1259
1260 self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
1261 new_enabled_disk_templates)
1262
1263 if self.op.file_storage_dir is not None:
1264 CheckFileStoragePathVsEnabledDiskTemplates(
1265 self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
1266
1267 if self.op.shared_file_storage_dir is not None:
1268 CheckSharedFileStoragePathVsEnabledDiskTemplates(
1269 self.LogWarning, self.op.shared_file_storage_dir,
1270 enabled_disk_templates)
1271
1272 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
1273 drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
1274 self._CheckDrbdHelper(vm_capable_node_uuids,
1275 drbd_enabled, drbd_gets_enabled)
1276
1277 # validate params changes
1278 if self.op.beparams:
1279 objects.UpgradeBeParams(self.op.beparams)
1280 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1281 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
1282
1283 if self.op.ndparams:
1284 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
1285 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
1286
1287 # TODO: we need a more general way to handle resetting
1288 # cluster-level parameters to default values
1289 if self.new_ndparams["oob_program"] == "":
1290 self.new_ndparams["oob_program"] = \
1291 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
1292
1293 if self.op.hv_state:
1294 new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
1295 self.cluster.hv_state_static)
1296 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
1297 for hv, values in new_hv_state.items())
1298
1299 if self.op.disk_state:
1300 new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
1301 self.cluster.disk_state_static)
1302 self.new_disk_state = \
1303 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
1304 for name, values in svalues.items()))
1305 for storage, svalues in new_disk_state.items())
1306
1307 self._CheckIpolicy(cluster, enabled_disk_templates)
1308
1309 if self.op.nicparams:
1310 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1311 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
1312 objects.NIC.CheckParameterSyntax(self.new_nicparams)
1313 nic_errors = []
1314
1315 # check all instances for consistency
1316 for instance in self.cfg.GetAllInstancesInfo().values():
1317 for nic_idx, nic in enumerate(instance.nics):
1318 params_copy = copy.deepcopy(nic.nicparams)
1319 params_filled = objects.FillDict(self.new_nicparams, params_copy)
1320
1321 # check parameter syntax
1322 try:
1323 objects.NIC.CheckParameterSyntax(params_filled)
1324 except errors.ConfigurationError, err:
1325 nic_errors.append("Instance %s, nic/%d: %s" %
1326 (instance.name, nic_idx, err))
1327
1328 # if we're moving instances to routed, check that they have an ip
1329 target_mode = params_filled[constants.NIC_MODE]
1330 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
1331 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
1332 " address" % (instance.name, nic_idx))
1333 if nic_errors:
1334 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
1335 "\n".join(nic_errors), errors.ECODE_INVAL)
1336
1337 # hypervisor list/parameters
1338 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
1339 if self.op.hvparams:
1340 for hv_name, hv_dict in self.op.hvparams.items():
1341 if hv_name not in self.new_hvparams:
1342 self.new_hvparams[hv_name] = hv_dict
1343 else:
1344 self.new_hvparams[hv_name].update(hv_dict)
1345
1346 # disk template parameters
1347 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
1348 if self.op.diskparams:
1349 for dt_name, dt_params in self.op.diskparams.items():
1350 if dt_name not in self.new_diskparams:
1351 self.new_diskparams[dt_name] = dt_params
1352 else:
1353 self.new_diskparams[dt_name].update(dt_params)
1354 CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
1355
1356 # os hypervisor parameters
1357 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
1358 if self.op.os_hvp:
1359 for os_name, hvs in self.op.os_hvp.items():
1360 if os_name not in self.new_os_hvp:
1361 self.new_os_hvp[os_name] = hvs
1362 else:
1363 for hv_name, hv_dict in hvs.items():
1364 if hv_dict is None:
1365 # Delete if it exists
1366 self.new_os_hvp[os_name].pop(hv_name, None)
1367 elif hv_name not in self.new_os_hvp[os_name]:
1368 self.new_os_hvp[os_name][hv_name] = hv_dict
1369 else:
1370 self.new_os_hvp[os_name][hv_name].update(hv_dict)
1371
1372 # os parameters
1373 self._BuildOSParams(cluster)
1374
1375 # changes to the hypervisor list
1376 if self.op.enabled_hypervisors is not None:
1377 for hv in self.op.enabled_hypervisors:
1378 # if the hypervisor doesn't already exist in the cluster
1379 # hvparams, we initialize it to empty, and then (in both
1380 # cases) we make sure to fill the defaults, as we might not
1381 # have a complete defaults list if the hypervisor wasn't
1382 # enabled before
1383 if hv not in new_hvp:
1384 new_hvp[hv] = {}
1385 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
1386 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
1387
1388 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1389 # either the enabled list has changed, or the parameters have, validate
1390 for hv_name, hv_params in self.new_hvparams.items():
1391 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1392 (self.op.enabled_hypervisors and
1393 hv_name in self.op.enabled_hypervisors)):
1394 # either this is a new hypervisor, or its parameters have changed
1395 hv_class = hypervisor.GetHypervisorClass(hv_name)
1396 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1397 hv_class.CheckParameterSyntax(hv_params)
1398 CheckHVParams(self, node_uuids, hv_name, hv_params)
1399
1400 if self.op.os_hvp:
1401 # no need to check any newly-enabled hypervisors, since the
1402 # defaults have already been checked in the above code-block
1403 for os_name, os_hvp in self.new_os_hvp.items():
1404 for hv_name, hv_params in os_hvp.items():
1405 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1406 # we need to fill in the new os_hvp on top of the actual hv_p
1407 cluster_defaults = self.new_hvparams.get(hv_name, {})
1408 new_osp = objects.FillDict(cluster_defaults, hv_params)
1409 hv_class = hypervisor.GetHypervisorClass(hv_name)
1410 hv_class.CheckParameterSyntax(new_osp)
1411 CheckHVParams(self, node_uuids, hv_name, new_osp)
1412
1413 if self.op.default_iallocator:
1414 alloc_script = utils.FindFile(self.op.default_iallocator,
1415 constants.IALLOCATOR_SEARCH_PATH,
1416 os.path.isfile)
1417 if alloc_script is None:
1418 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
1419 " specified" % self.op.default_iallocator,
1420 errors.ECODE_INVAL)
1421
1422 if self.op.instance_communication_network:
1423 network_name = self.op.instance_communication_network
1424
1425 try:
1426 network_uuid = self.cfg.LookupNetwork(network_name)
1427 except errors.OpPrereqError:
1428 network_uuid = None
1429
1430 if network_uuid is not None:
1431 network = self.cfg.GetNetwork(network_uuid)
1432 self._CheckInstanceCommunicationNetwork(network, self.LogWarning)
1433
1434 if self.op.compression_tools:
1435 CheckCompressionTools(self.op.compression_tools)
1436
1437 def _BuildOSParams(self, cluster):
1438 "Calculate the new OS parameters for this operation."
1439
1440 def _GetNewParams(source, new_params):
1441 "Wrapper around GetUpdatedParams."
1442 if new_params is None:
1443 return source
1444 result = objects.FillDict(source, {}) # deep copy of source
1445 for os_name in new_params:
1446 result[os_name] = GetUpdatedParams(result.get(os_name, {}),
1447 new_params[os_name],
1448 use_none=True)
1449 if not result[os_name]:
1450 del result[os_name] # we removed all parameters
1451 return result
1452
1453 self.new_osp = _GetNewParams(cluster.osparams,
1454 self.op.osparams)
1455 self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster,
1456 self.op.osparams_private_cluster)
1457
1458 # Remove os validity check
1459 changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys()))
1460 for os_name in changed_oses:
1461 os_params = cluster.SimpleFillOS(
1462 os_name,
1463 self.new_osp.get(os_name, {}),
1464 os_params_private=self.new_osp_private.get(os_name, {})
1465 )
1466 # check the parameter validity (remote check)
1467 CheckOSParams(self, False, [self.cfg.GetMasterNode()],
1468 os_name, os_params, False)
1469
1470 def _SetVgName(self, feedback_fn):
1471 """Determines and sets the new volume group name.
1472
1473 """
1474 if self.op.vg_name is not None:
1475 new_volume = self.op.vg_name
1476 if not new_volume:
1477 new_volume = None
1478 if new_volume != self.cfg.GetVGName():
1479 self.cfg.SetVGName(new_volume)
1480 else:
1481 feedback_fn("Cluster LVM configuration already in desired"
1482 " state, not changing")
1483
1484 def _SetDiagnoseDataCollectorFilename(self, feedback_fn):
1485 """Determines and sets the filename of the script
1486 diagnose data collector should run.
1487
1488 """
1489 if self.op.diagnose_data_collector_filename is not None:
1490 fn = self.op.diagnose_data_collector_filename
1491 if fn != self.cfg.GetDiagnoseDataCollectorFilename():
1492 self.cfg.SetDiagnoseDataCollectorFilename(fn)
1493 else:
1494 feedback_fn("Diagnose data collector filename"
1495 " configuration already in desired"
1496 " state, not changing")
1497
1498 def _SetFileStorageDir(self, feedback_fn):
1499 """Set the file storage directory.
1500
1501 """
1502 if self.op.file_storage_dir is not None:
1503 if self.cluster.file_storage_dir == self.op.file_storage_dir:
1504 feedback_fn("Global file storage dir already set to value '%s'"
1505 % self.cluster.file_storage_dir)
1506 else:
1507 self.cluster.file_storage_dir = self.op.file_storage_dir
1508
1509 def _SetSharedFileStorageDir(self, feedback_fn):
1510 """Set the shared file storage directory.
1511
1512 """
1513 if self.op.shared_file_storage_dir is not None:
1514 if self.cluster.shared_file_storage_dir == \
1515 self.op.shared_file_storage_dir:
1516 feedback_fn("Global shared file storage dir already set to value '%s'"
1517 % self.cluster.shared_file_storage_dir)
1518 else:
1519 self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
1520
1521 def _SetDrbdHelper(self, feedback_fn):
1522 """Set the DRBD usermode helper.
1523
1524 """
1525 if self.op.drbd_helper is not None:
1526 if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
1527 feedback_fn("Note that you specified a drbd user helper, but did not"
1528 " enable the drbd disk template.")
1529 new_helper = self.op.drbd_helper
1530 if not new_helper:
1531 new_helper = None
1532 if new_helper != self.cfg.GetDRBDHelper():
1533 self.cfg.SetDRBDHelper(new_helper)
1534 else:
1535 feedback_fn("Cluster DRBD helper already in desired state,"
1536 " not changing")
1537
1538 @staticmethod
1539 def _EnsureInstanceCommunicationNetwork(cfg, network_name):
1540 """Ensure that the instance communication network exists and is
1541 connected to all groups.
1542
1543 The instance communication network given by L{network_name} it is
1544 created, if necessary, via the opcode 'OpNetworkAdd'. Also, the
1545 instance communication network is connected to all existing node
1546 groups, if necessary, via the opcode 'OpNetworkConnect'.
1547
1548 @type cfg: L{config.ConfigWriter}
1549 @param cfg: cluster configuration
1550
1551 @type network_name: string
1552 @param network_name: instance communication network name
1553
1554 @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
1555 @return: L{ganeti.cmdlib.ResultWithJobs} if the instance
1556 communication needs to be created or it needs to be
1557 connected to a group, otherwise L{None}
1558
1559 """
1560 jobs = []
1561
1562 try:
1563 network_uuid = cfg.LookupNetwork(network_name)
1564 network_exists = True
1565 except errors.OpPrereqError:
1566 network_exists = False
1567
1568 if not network_exists:
1569 jobs.append(AddInstanceCommunicationNetworkOp(network_name))
1570
1571 for group_uuid in cfg.GetNodeGroupList():
1572 group = cfg.GetNodeGroup(group_uuid)
1573
1574 if network_exists:
1575 network_connected = network_uuid in group.networks
1576 else:
1577 # The network was created asynchronously by the previous
1578 # opcode and, therefore, we don't have access to its
1579 # network_uuid. As a result, we assume that the network is
1580 # not connected to any group yet.
1581 network_connected = False
1582
1583 if not network_connected:
1584 op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
1585 jobs.append(op)
1586
1587 if jobs:
1588 return ResultWithJobs([jobs])
1589 else:
1590 return None
1591
1592 @staticmethod
1593 def _ModifyInstanceCommunicationNetwork(cfg, network_name, feedback_fn):
1594 """Update the instance communication network stored in the cluster
1595 configuration.
1596
1597 Compares the user-supplied instance communication network against
1598 the one stored in the Ganeti cluster configuration. If there is a
1599 change, the instance communication network may be possibly created
1600 and connected to all groups (see
1601 L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}).
1602
1603 @type cfg: L{config.ConfigWriter}
1604 @param cfg: cluster configuration
1605
1606 @type network_name: string
1607 @param network_name: instance communication network name
1608
1609 @type feedback_fn: function
1610 @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit}
1611
1612 @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None}
1613 @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}
1614
1615 """
1616 config_network_name = cfg.GetInstanceCommunicationNetwork()
1617
1618 if network_name == config_network_name:
1619 feedback_fn("Instance communication network already is '%s', nothing to"
1620 " do." % network_name)
1621 else:
1622 try:
1623 cfg.LookupNetwork(config_network_name)
1624 feedback_fn("Previous instance communication network '%s'"
1625 " should be removed manually." % config_network_name)
1626 except errors.OpPrereqError:
1627 pass
1628
1629 if network_name:
1630 feedback_fn("Changing instance communication network to '%s', only new"
1631 " instances will be affected."
1632 % network_name)
1633 else:
1634 feedback_fn("Disabling instance communication network, only new"
1635 " instances will be affected.")
1636
1637 cfg.SetInstanceCommunicationNetwork(network_name)
1638
1639 if network_name:
1640 return LUClusterSetParams._EnsureInstanceCommunicationNetwork(
1641 cfg,
1642 network_name)
1643 else:
1644 return None
1645
1646 def Exec(self, feedback_fn):
1647 """Change the parameters of the cluster.
1648
1649 """
1650 # re-read the fresh configuration
1651 self.cluster = self.cfg.GetClusterInfo()
1652 if self.op.enabled_disk_templates:
1653 self.cluster.enabled_disk_templates = \
1654 list(self.op.enabled_disk_templates)
1655 # save the changes
1656 self.cfg.Update(self.cluster, feedback_fn)
1657
1658 self._SetVgName(feedback_fn)
1659
1660 self.cluster = self.cfg.GetClusterInfo()
1661 self._SetFileStorageDir(feedback_fn)
1662 self._SetSharedFileStorageDir(feedback_fn)
1663 self.cfg.Update(self.cluster, feedback_fn)
1664 self._SetDrbdHelper(feedback_fn)
1665 self._SetDiagnoseDataCollectorFilename(feedback_fn)
1666
1667 # re-read the fresh configuration again
1668 self.cluster = self.cfg.GetClusterInfo()
1669
1670 ensure_kvmd = False
1671 stop_kvmd_silently = not (
1672 constants.HT_KVM in self.cluster.enabled_hypervisors or
1673 (self.op.enabled_hypervisors is not None and
1674 constants.HT_KVM in self.op.enabled_hypervisors))
1675
1676 active = constants.DATA_COLLECTOR_STATE_ACTIVE
1677 if self.op.enabled_data_collectors is not None:
1678 for name, val in self.op.enabled_data_collectors.items():
1679 self.cluster.data_collectors[name][active] = val
1680
1681 if self.op.data_collector_interval:
1682 internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL
1683 for name, val in self.op.data_collector_interval.items():
1684 self.cluster.data_collectors[name][internal] = int(val)
1685
1686 if self.op.hvparams:
1687 self.cluster.hvparams = self.new_hvparams
1688 if self.op.os_hvp:
1689 self.cluster.os_hvp = self.new_os_hvp
1690 if self.op.enabled_hypervisors is not None:
1691 self.cluster.hvparams = self.new_hvparams
1692 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1693 ensure_kvmd = True
1694 if self.op.beparams:
1695 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1696 if self.op.nicparams:
1697 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1698 if self.op.ipolicy:
1699 self.cluster.ipolicy = self.new_ipolicy
1700 if self.op.osparams:
1701 self.cluster.osparams = self.new_osp
1702 if self.op.osparams_private_cluster:
1703 self.cluster.osparams_private_cluster = self.new_osp_private
1704 if self.op.ndparams:
1705 self.cluster.ndparams = self.new_ndparams
1706 if self.op.diskparams:
1707 self.cluster.diskparams = self.new_diskparams
1708 if self.op.hv_state:
1709 self.cluster.hv_state_static = self.new_hv_state
1710 if self.op.disk_state:
1711 self.cluster.disk_state_static = self.new_disk_state
1712
1713 if self.op.candidate_pool_size is not None:
1714 self.cluster.candidate_pool_size = self.op.candidate_pool_size
1715 # we need to update the pool size here, otherwise the save will fail
1716 master_node = self.cfg.GetMasterNode()
1717 potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
1718 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1719 AdjustCandidatePool(
1720 self, [], master_node, potential_master_candidates, feedback_fn,
1721 modify_ssh_setup)
1722
1723 if self.op.max_running_jobs is not None:
1724 self.cluster.max_running_jobs = self.op.max_running_jobs
1725
1726 if self.op.max_tracked_jobs is not None:
1727 self.cluster.max_tracked_jobs = self.op.max_tracked_jobs
1728
1729 if self.op.maintain_node_health is not None:
1730 self.cluster.maintain_node_health = self.op.maintain_node_health
1731
1732 if self.op.modify_etc_hosts is not None:
1733 self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
1734
1735 if self.op.modify_ssh_setup is not None:
1736 if (self.op.modify_ssh_setup and
1737 not self.cfg.GetClusterInfo().modify_ssh_setup):
1738 feedback_fn(
1739 "Enabling modify_ssh_setup for cluster. You may need to run"
1740 " 'gnt-cluster renew-crypto --new-ssh-keys --no-ssh-key-check'"
1741 " to redistribute the ssh public key settings for each node.")
1742 self.cluster.modify_ssh_setup = self.op.modify_ssh_setup
1743
1744 if self.op.prealloc_wipe_disks is not None:
1745 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
1746
1747 if self.op.add_uids is not None:
1748 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
1749
1750 if self.op.remove_uids is not None:
1751 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
1752
1753 if self.op.uid_pool is not None:
1754 self.cluster.uid_pool = self.op.uid_pool
1755
1756 if self.op.default_iallocator is not None:
1757 self.cluster.default_iallocator = self.op.default_iallocator
1758
1759 if self.op.default_iallocator_params is not None:
1760 self.cluster.default_iallocator_params = self.op.default_iallocator_params
1761
1762 if self.op.reserved_lvs is not None:
1763 self.cluster.reserved_lvs = self.op.reserved_lvs
1764
1765 if self.op.use_external_mip_script is not None:
1766 self.cluster.use_external_mip_script = self.op.use_external_mip_script
1767
1768 if self.op.enabled_user_shutdown is not None and \
1769 self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
1770 self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
1771 ensure_kvmd = True
1772
1773 def helper_os(aname, mods, desc):
1774 desc += " OS list"
1775 lst = getattr(self.cluster, aname)
1776 for key, val in mods:
1777 if key == constants.DDM_ADD:
1778 if val in lst:
1779 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
1780 else:
1781 lst.append(val)
1782 elif key == constants.DDM_REMOVE:
1783 if val in lst:
1784 lst.remove(val)
1785 else:
1786 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
1787 else:
1788 raise errors.ProgrammerError("Invalid modification '%s'" % key)
1789
1790 if self.op.hidden_os:
1791 helper_os("hidden_os", self.op.hidden_os, "hidden")
1792
1793 if self.op.blacklisted_os:
1794 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
1795
1796 if self.op.mac_prefix:
1797 self.cluster.mac_prefix = self.op.mac_prefix
1798
1799 if self.op.master_netdev:
1800 master_params = self.cfg.GetMasterNetworkParameters()
1801 ems = self.cfg.GetUseExternalMipScript()
1802 feedback_fn("Shutting down master ip on the current netdev (%s)" %
1803 self.cluster.master_netdev)
1804 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
1805 master_params, ems)
1806 if not self.op.force:
1807 result.Raise("Could not disable the master ip")
1808 else:
1809 if result.fail_msg:
1810 msg = ("Could not disable the master ip (continuing anyway): %s" %
1811 result.fail_msg)
1812 feedback_fn(msg)
1813 feedback_fn("Changing master_netdev from %s to %s" %
1814 (master_params.netdev, self.op.master_netdev))
1815 self.cluster.master_netdev = self.op.master_netdev
1816
1817 if self.op.master_netmask:
1818 master_params = self.cfg.GetMasterNetworkParameters()
1819 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
1820 result = self.rpc.call_node_change_master_netmask(
1821 master_params.uuid, master_params.netmask,
1822 self.op.master_netmask, master_params.ip,
1823 master_params.netdev)
1824 result.Warn("Could not change the master IP netmask", feedback_fn)
1825 self.cluster.master_netmask = self.op.master_netmask
1826
1827 if self.op.install_image:
1828 self.cluster.install_image = self.op.install_image
1829
1830 if self.op.zeroing_image is not None:
1831 CheckImageValidity(self.op.zeroing_image,
1832 "Zeroing image must be an absolute path or a URL")
1833 self.cluster.zeroing_image = self.op.zeroing_image
1834
1835 self.cfg.Update(self.cluster, feedback_fn)
1836
1837 if self.op.master_netdev:
1838 master_params = self.cfg.GetMasterNetworkParameters()
1839 feedback_fn("Starting the master ip on the new master netdev (%s)" %
1840 self.op.master_netdev)
1841 ems = self.cfg.GetUseExternalMipScript()
1842 result = self.rpc.call_node_activate_master_ip(master_params.uuid,
1843 master_params, ems)
1844 result.Warn("Could not re-enable the master ip on the master,"
1845 " please restart manually", self.LogWarning)
1846
1847 # Even though 'self.op.enabled_user_shutdown' is being tested
1848 # above, the RPCs can only be done after 'self.cfg.Update' because
1849 # this will update the cluster object and sync 'Ssconf', and kvmd
1850 # uses 'Ssconf'.
1851 if ensure_kvmd:
1852 EnsureKvmdOnNodes(self, feedback_fn, silent_stop=stop_kvmd_silently)
1853
1854 if self.op.compression_tools is not None:
1855 self.cfg.SetCompressionTools(self.op.compression_tools)
1856
1857 if self.op.maint_round_delay is not None:
1858 self.cfg.SetMaintdRoundDelay(self.op.maint_round_delay)
1859
1860 if self.op.maint_balance is not None:
1861 self.cfg.SetMaintdBalance(self.op.maint_balance)
1862
1863 if self.op.maint_balance_threshold is not None:
1864 self.cfg.SetMaintdBalanceThreshold(self.op.maint_balance_threshold)
1865
1866 network_name = self.op.instance_communication_network
1867 if network_name is not None:
1868 return self._ModifyInstanceCommunicationNetwork(self.cfg,
1869 network_name, feedback_fn)
1870 else:
1871 return None