Check for same primary node before disk attachment
[ganeti-github.git] / lib / cmdlib / instance_set_params.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Logical unit setting parameters of a single instance."""
31
32 import copy
33 import logging
34 import os
35
36 from ganeti import compat
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import ht
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import netutils
43 from ganeti import objects
44 from ganeti import utils
45 import ganeti.rpc.node as rpc
46
47 from ganeti.cmdlib.base import LogicalUnit
48
49 from ganeti.cmdlib.common import INSTANCE_DOWN, \
50 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
51 CheckParamsNotGlobal, \
52 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
53 GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \
54 IsValidDiskAccessModeCombination, AnnotateDiskParams
55 from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
56 CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \
57 CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \
58 CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \
59 IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \
60 WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks
61 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
62 NICToTuple, CheckNodeNotDrained, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 UpdateMetadata, CheckForConflictingIp, \
66 PrepareContainerMods, ComputeInstanceCommunicationNIC, \
67 ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
68 CheckNodesPhysicalCPUs
69 import ganeti.masterd.instance
70
71
72 class InstNicModPrivate(object):
73 """Data structure for network interface modifications.
74
75 Used by L{LUInstanceSetParams}.
76
77 """
78 def __init__(self):
79 self.params = None
80 self.filled = None
81
82
83 class LUInstanceSetParams(LogicalUnit):
84 """Modifies an instances's parameters.
85
86 """
87 HPATH = "instance-modify"
88 HTYPE = constants.HTYPE_INSTANCE
89 REQ_BGL = False
90
91 def GenericGetDiskInfo(self, uuid=None, name=None):
92 """Find a disk object using the provided params.
93
94 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
95 config functions to retrieve the disk info based on these arguments.
96
97 In case of an error, raise the appropriate exceptions.
98 """
99 if uuid:
100 disk = self.cfg.GetDiskInfo(uuid)
101 if disk is None:
102 raise errors.OpPrereqError("No disk was found with this UUID: %s" %
103 uuid, errors.ECODE_INVAL)
104 elif name:
105 disk = self.cfg.GetDiskInfoByName(name)
106 if disk is None:
107 raise errors.OpPrereqError("No disk was found with this name: %s" %
108 name, errors.ECODE_INVAL)
109 else:
110 raise errors.ProgrammerError("No disk UUID or name was given")
111
112 return disk
113
114 @staticmethod
115 def _UpgradeDiskNicMods(kind, mods, verify_fn):
116 assert ht.TList(mods)
117 assert not mods or len(mods[0]) in (2, 3)
118
119 if mods and len(mods[0]) == 2:
120 result = []
121
122 addremove = 0
123 for op, params in mods:
124 if op in (constants.DDM_ADD, constants.DDM_ATTACH,
125 constants.DDM_REMOVE, constants.DDM_DETACH):
126 result.append((op, -1, params))
127 addremove += 1
128
129 if addremove > 1:
130 raise errors.OpPrereqError("Only one %s add/attach/remove/detach "
131 "operation is supported at a time" %
132 kind, errors.ECODE_INVAL)
133 else:
134 result.append((constants.DDM_MODIFY, op, params))
135
136 assert verify_fn(result)
137 else:
138 result = mods
139 return result
140
141 @staticmethod
142 def _CheckMods(kind, mods, key_types, item_fn):
143 """Ensures requested disk/NIC modifications are valid.
144
145 Note that the 'attach' action needs a way to refer to the UUID of the disk,
146 since the disk name is not unique cluster-wide. However, the UUID of the
147 disk is not settable but rather generated by Ganeti automatically,
148 therefore it cannot be passed as an IDISK parameter. For this reason, this
149 function will override the checks to accept uuid parameters solely for the
150 attach action.
151 """
152 # Create a key_types copy with the 'uuid' as a valid key type.
153 key_types_attach = key_types.copy()
154 key_types_attach['uuid'] = 'string'
155
156 for (op, _, params) in mods:
157 assert ht.TDict(params)
158
159 # If 'key_types' is an empty dict, we assume we have an
160 # 'ext' template and thus do not ForceDictType
161 if key_types:
162 utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH
163 else key_types_attach))
164
165 if op in (constants.DDM_REMOVE, constants.DDM_DETACH):
166 if params:
167 raise errors.OpPrereqError("No settings should be passed when"
168 " removing or detaching a %s" % kind,
169 errors.ECODE_INVAL)
170 elif op in (constants.DDM_ADD, constants.DDM_ATTACH,
171 constants.DDM_MODIFY):
172 item_fn(op, params)
173 else:
174 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
175
176 def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):
177 """Verifies a disk modification.
178
179 """
180 disk_type = params.get(
181 constants.IDISK_TYPE,
182 self.cfg.GetInstanceDiskTemplate(self.instance.uuid))
183
184 if op == constants.DDM_ADD:
185 params[constants.IDISK_TYPE] = disk_type
186
187 if disk_type == constants.DT_DISKLESS:
188 raise errors.OpPrereqError(
189 "Must specify disk type on diskless instance", errors.ECODE_INVAL)
190
191 if disk_type != constants.DT_EXT:
192 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
193
194 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
195 if mode not in constants.DISK_ACCESS_SET:
196 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
197 errors.ECODE_INVAL)
198
199 size = params.get(constants.IDISK_SIZE, None)
200 if size is None:
201 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
202 constants.IDISK_SIZE, errors.ECODE_INVAL)
203 size = int(size)
204
205 params[constants.IDISK_SIZE] = size
206 name = params.get(constants.IDISK_NAME, None)
207 if name is not None and name.lower() == constants.VALUE_NONE:
208 params[constants.IDISK_NAME] = None
209
210 # This check is necessary both when adding and attaching disks
211 if op in (constants.DDM_ADD, constants.DDM_ATTACH):
212 CheckSpindlesExclusiveStorage(params, excl_stor, True)
213 CheckDiskExtProvider(params, disk_type)
214
215 # Make sure we do not add syncing disks to instances with inactive disks
216 if not self.op.wait_for_sync and not self.instance.disks_active:
217 raise errors.OpPrereqError("Can't %s a disk to an instance with"
218 " deactivated disks and --no-wait-for-sync"
219 " given" % op, errors.ECODE_INVAL)
220
221 # Check disk access param (only for specific disks)
222 if disk_type in constants.DTS_HAVE_ACCESS:
223 access_type = params.get(constants.IDISK_ACCESS,
224 group_access_types[disk_type])
225 if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
226 disk_type, access_type):
227 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
228 " used with %s disk access param" %
229 (self.instance.hypervisor, access_type),
230 errors.ECODE_STATE)
231
232 if op == constants.DDM_ATTACH:
233 if len(params) != 1 or ('uuid' not in params and
234 constants.IDISK_NAME not in params):
235 raise errors.OpPrereqError("Only one argument is permitted in %s op,"
236 " either %s or uuid" % (constants.DDM_ATTACH,
237 constants.IDISK_NAME,
238 ),
239 errors.ECODE_INVAL)
240 self._CheckAttachDisk(params)
241
242 elif op == constants.DDM_MODIFY:
243 if constants.IDISK_SIZE in params:
244 raise errors.OpPrereqError("Disk size change not possible, use"
245 " grow-disk", errors.ECODE_INVAL)
246
247 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
248
249 # Disk modification supports changing only the disk name and mode.
250 # Changing arbitrary parameters is allowed only for ext disk template",
251 if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):
252 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
253 else:
254 # We have to check that the 'access' and 'disk_provider' parameters
255 # cannot be modified
256 for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:
257 if param in params:
258 raise errors.OpPrereqError("Disk '%s' parameter change is"
259 " not possible" % param,
260 errors.ECODE_INVAL)
261
262 name = params.get(constants.IDISK_NAME, None)
263 if name is not None and name.lower() == constants.VALUE_NONE:
264 params[constants.IDISK_NAME] = None
265
266 @staticmethod
267 def _VerifyNicModification(op, params):
268 """Verifies a network interface modification.
269
270 """
271 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
272 ip = params.get(constants.INIC_IP, None)
273 name = params.get(constants.INIC_NAME, None)
274 req_net = params.get(constants.INIC_NETWORK, None)
275 link = params.get(constants.NIC_LINK, None)
276 mode = params.get(constants.NIC_MODE, None)
277 if name is not None and name.lower() == constants.VALUE_NONE:
278 params[constants.INIC_NAME] = None
279 if req_net is not None:
280 if req_net.lower() == constants.VALUE_NONE:
281 params[constants.INIC_NETWORK] = None
282 req_net = None
283 elif link is not None or mode is not None:
284 raise errors.OpPrereqError("If network is given"
285 " mode or link should not",
286 errors.ECODE_INVAL)
287
288 if op == constants.DDM_ADD:
289 macaddr = params.get(constants.INIC_MAC, None)
290 if macaddr is None:
291 params[constants.INIC_MAC] = constants.VALUE_AUTO
292
293 if ip is not None:
294 if ip.lower() == constants.VALUE_NONE:
295 params[constants.INIC_IP] = None
296 else:
297 if ip.lower() == constants.NIC_IP_POOL:
298 if op == constants.DDM_ADD and req_net is None:
299 raise errors.OpPrereqError("If ip=pool, parameter network"
300 " cannot be none",
301 errors.ECODE_INVAL)
302 else:
303 if not netutils.IPAddress.IsValid(ip):
304 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
305 errors.ECODE_INVAL)
306
307 if constants.INIC_MAC in params:
308 macaddr = params[constants.INIC_MAC]
309 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
310 macaddr = utils.NormalizeAndValidateMac(macaddr)
311
312 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
313 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
314 " modifying an existing NIC",
315 errors.ECODE_INVAL)
316
317 def _LookupDiskIndex(self, idx):
318 """Looks up uuid or name of disk if necessary."""
319 try:
320 return int(idx)
321 except ValueError:
322 pass
323 for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
324 if d.name == idx or d.uuid == idx:
325 return i
326 raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
327
328 def _LookupDiskMods(self):
329 """Looks up uuid or name of disk if necessary."""
330 return [(op, self._LookupDiskIndex(idx), params)
331 for op, idx, params in self.op.disks]
332
333 def CheckArguments(self):
334 if not (self.op.nics or self.op.disks or self.op.disk_template or
335 self.op.hvparams or self.op.beparams or self.op.os_name or
336 self.op.osparams or self.op.offline is not None or
337 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
338 self.op.instance_communication is not None):
339 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
340
341 if self.op.hvparams:
342 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
343 "hypervisor", "instance", "cluster")
344
345 self.op.disks = self._UpgradeDiskNicMods(
346 "disk", self.op.disks,
347 ht.TSetParamsMods(ht.TIDiskParams))
348 self.op.nics = self._UpgradeDiskNicMods(
349 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
350
351 # Check disk template modifications
352 if self.op.disk_template:
353 if self.op.disks:
354 raise errors.OpPrereqError("Disk template conversion and other disk"
355 " changes not supported at the same time",
356 errors.ECODE_INVAL)
357
358 # mirrored template node checks
359 if self.op.disk_template in constants.DTS_INT_MIRROR:
360 if not self.op.remote_node:
361 raise errors.OpPrereqError("Changing the disk template to a mirrored"
362 " one requires specifying a secondary"
363 " node", errors.ECODE_INVAL)
364 elif self.op.remote_node:
365 self.LogWarning("Changing the disk template to a non-mirrored one,"
366 " the secondary node will be ignored")
367 # the secondary node must be cleared in order to be ignored, otherwise
368 # the operation will fail, in the GenerateDiskTemplate method
369 self.op.remote_node = None
370
371 # file-based template checks
372 if self.op.disk_template in constants.DTS_FILEBASED:
373 self._FillFileDriver()
374
375 # Check NIC modifications
376 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
377 self._VerifyNicModification)
378
379 if self.op.pnode:
380 (self.op.pnode_uuid, self.op.pnode) = \
381 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
382
383 def _CheckAttachDisk(self, params):
384 """Check if disk can be attached to an instance.
385
386 Check if the disk and instance have the same template. Also, check if the
387 disk nodes are visible from the instance.
388 """
389 uuid = params.get("uuid", None)
390 name = params.get(constants.IDISK_NAME, None)
391
392 disk = self.GenericGetDiskInfo(uuid, name)
393 instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
394 if (disk.dev_type != instance_template and
395 instance_template != constants.DT_DISKLESS):
396 raise errors.OpPrereqError("Instance has '%s' template while disk has"
397 " '%s' template" %
398 (instance_template, disk.dev_type),
399 errors.ECODE_INVAL)
400
401 instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
402 # Make sure we do not attach disks to instances on wrong nodes. If the
403 # instance is diskless, that instance is associated only to the primary
404 # node, whereas the disk can be associated to two nodes in the case of DRBD,
405 # hence, we have a subset check here.
406 if disk.nodes and not set(instance_nodes).issubset(set(disk.nodes)):
407 raise errors.OpPrereqError("Disk nodes are %s while the instance's nodes"
408 " are %s" %
409 (disk.nodes, instance_nodes),
410 errors.ECODE_INVAL)
411 # Make sure a DRBD disk has the same primary node as the instance where it
412 # will be attached to.
413 disk_primary = disk.GetPrimaryNode(self.instance.primary_node)
414 if self.instance.primary_node != disk_primary:
415 raise errors.OpExecError("The disks' primary node is %s whereas the "
416 "instance's primary node is %s."
417 % (disk_primary, self.instance.primary_node))
418
419 def ExpandNames(self):
420 self._ExpandAndLockInstance()
421 self.needed_locks[locking.LEVEL_NODEGROUP] = []
422 # Can't even acquire node locks in shared mode as upcoming changes in
423 # Ganeti 2.6 will start to modify the node object on disk conversion
424 self.needed_locks[locking.LEVEL_NODE] = []
425 self.needed_locks[locking.LEVEL_NODE_RES] = []
426 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
427 # Look node group to look up the ipolicy
428 self.share_locks[locking.LEVEL_NODEGROUP] = 1
429 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
430 self.dont_collate_locks[locking.LEVEL_NODE] = True
431 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
432
433 def DeclareLocks(self, level):
434 if level == locking.LEVEL_NODEGROUP:
435 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
436 # Acquire locks for the instance's nodegroups optimistically. Needs
437 # to be verified in CheckPrereq
438 self.needed_locks[locking.LEVEL_NODEGROUP] = \
439 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
440 elif level == locking.LEVEL_NODE:
441 self._LockInstancesNodes()
442 if self.op.disk_template and self.op.remote_node:
443 (self.op.remote_node_uuid, self.op.remote_node) = \
444 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
445 self.op.remote_node)
446 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
447 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
448 # Copy node locks
449 self.needed_locks[locking.LEVEL_NODE_RES] = \
450 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
451
452 def BuildHooksEnv(self):
453 """Build hooks env.
454
455 This runs on the master, primary and secondaries.
456
457 """
458 args = {}
459 if constants.BE_MINMEM in self.be_new:
460 args["minmem"] = self.be_new[constants.BE_MINMEM]
461 if constants.BE_MAXMEM in self.be_new:
462 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
463 if constants.BE_VCPUS in self.be_new:
464 args["vcpus"] = self.be_new[constants.BE_VCPUS]
465 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
466 # information at all.
467
468 if self._new_nics is not None:
469 nics = []
470
471 for nic in self._new_nics:
472 n = copy.deepcopy(nic)
473 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
474 n.nicparams = nicparams
475 nics.append(NICToTuple(self, n))
476
477 args["nics"] = nics
478
479 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
480 if self.op.disk_template:
481 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
482 if self.op.runtime_mem:
483 env["RUNTIME_MEMORY"] = self.op.runtime_mem
484
485 return env
486
487 def BuildHooksNodes(self):
488 """Build hooks nodes.
489
490 """
491 nl = [self.cfg.GetMasterNode()] + \
492 list(self.cfg.GetInstanceNodes(self.instance.uuid))
493 return (nl, nl)
494
495 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
496 old_params, cluster, pnode_uuid):
497
498 update_params_dict = dict([(key, params[key])
499 for key in constants.NICS_PARAMETERS
500 if key in params])
501
502 req_link = update_params_dict.get(constants.NIC_LINK, None)
503 req_mode = update_params_dict.get(constants.NIC_MODE, None)
504
505 new_net_uuid = None
506 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
507 if new_net_uuid_or_name:
508 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
509 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
510
511 if old_net_uuid:
512 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
513
514 if new_net_uuid:
515 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
516 if not netparams:
517 raise errors.OpPrereqError("No netparams found for the network"
518 " %s, probably not connected" %
519 new_net_obj.name, errors.ECODE_INVAL)
520 new_params = dict(netparams)
521 else:
522 new_params = GetUpdatedParams(old_params, update_params_dict)
523
524 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
525
526 new_filled_params = cluster.SimpleFillNIC(new_params)
527 objects.NIC.CheckParameterSyntax(new_filled_params)
528
529 new_mode = new_filled_params[constants.NIC_MODE]
530 if new_mode == constants.NIC_MODE_BRIDGED:
531 bridge = new_filled_params[constants.NIC_LINK]
532 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
533 if msg:
534 msg = "Error checking bridges on node '%s': %s" % \
535 (self.cfg.GetNodeName(pnode_uuid), msg)
536 if self.op.force:
537 self.warn.append(msg)
538 else:
539 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
540
541 elif new_mode == constants.NIC_MODE_ROUTED:
542 ip = params.get(constants.INIC_IP, old_ip)
543 if ip is None and not new_net_uuid:
544 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
545 " on a routed NIC if not attached to a"
546 " network", errors.ECODE_INVAL)
547
548 elif new_mode == constants.NIC_MODE_OVS:
549 # TODO: check OVS link
550 self.LogInfo("OVS links are currently not checked for correctness")
551
552 if constants.INIC_MAC in params:
553 mac = params[constants.INIC_MAC]
554 if mac is None:
555 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
556 errors.ECODE_INVAL)
557 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
558 # otherwise generate the MAC address
559 params[constants.INIC_MAC] = \
560 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
561 else:
562 # or validate/reserve the current one
563 try:
564 self.cfg.ReserveMAC(mac, self.proc.GetECId())
565 except errors.ReservationError:
566 raise errors.OpPrereqError("MAC address '%s' already in use"
567 " in cluster" % mac,
568 errors.ECODE_NOTUNIQUE)
569 elif new_net_uuid != old_net_uuid:
570
571 def get_net_prefix(net_uuid):
572 mac_prefix = None
573 if net_uuid:
574 nobj = self.cfg.GetNetwork(net_uuid)
575 mac_prefix = nobj.mac_prefix
576
577 return mac_prefix
578
579 new_prefix = get_net_prefix(new_net_uuid)
580 old_prefix = get_net_prefix(old_net_uuid)
581 if old_prefix != new_prefix:
582 params[constants.INIC_MAC] = \
583 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
584
585 # if there is a change in (ip, network) tuple
586 new_ip = params.get(constants.INIC_IP, old_ip)
587 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
588 if new_ip:
589 # if IP is pool then require a network and generate one IP
590 if new_ip.lower() == constants.NIC_IP_POOL:
591 if new_net_uuid:
592 try:
593 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
594 except errors.ReservationError:
595 raise errors.OpPrereqError("Unable to get a free IP"
596 " from the address pool",
597 errors.ECODE_STATE)
598 self.LogInfo("Chose IP %s from network %s",
599 new_ip,
600 new_net_obj.name)
601 params[constants.INIC_IP] = new_ip
602 else:
603 raise errors.OpPrereqError("ip=pool, but no network found",
604 errors.ECODE_INVAL)
605 # Reserve new IP if in the new network if any
606 elif new_net_uuid:
607 try:
608 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
609 check=self.op.conflicts_check)
610 self.LogInfo("Reserving IP %s in network %s",
611 new_ip, new_net_obj.name)
612 except errors.ReservationError:
613 raise errors.OpPrereqError("IP %s not available in network %s" %
614 (new_ip, new_net_obj.name),
615 errors.ECODE_NOTUNIQUE)
616 # new network is None so check if new IP is a conflicting IP
617 elif self.op.conflicts_check:
618 CheckForConflictingIp(self, new_ip, pnode_uuid)
619
620 # release old IP if old network is not None
621 if old_ip and old_net_uuid:
622 try:
623 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
624 except errors.AddressPoolError:
625 logging.warning("Release IP %s not contained in network %s",
626 old_ip, old_net_obj.name)
627
628 # there are no changes in (ip, network) tuple and old network is not None
629 elif (old_net_uuid is not None and
630 (req_link is not None or req_mode is not None)):
631 raise errors.OpPrereqError("Not allowed to change link or mode of"
632 " a NIC that is connected to a network",
633 errors.ECODE_INVAL)
634
635 private.params = new_params
636 private.filled = new_filled_params
637
638 def _PreCheckDiskTemplate(self, pnode_info):
639 """CheckPrereq checks related to a new disk template."""
640 # Arguments are passed to avoid configuration lookups
641 pnode_uuid = self.instance.primary_node
642
643 # TODO make sure heterogeneous disk types can be converted.
644 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
645 if disk_template == constants.DT_MIXED:
646 raise errors.OpPrereqError(
647 "Conversion from mixed is not yet supported.")
648
649 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
650 if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
651 raise errors.OpPrereqError(
652 "Conversion from the '%s' disk template is not supported"
653 % self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
654 errors.ECODE_INVAL)
655
656 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
657 raise errors.OpPrereqError("Conversion to the '%s' disk template is"
658 " not supported" % self.op.disk_template,
659 errors.ECODE_INVAL)
660
661 if (self.op.disk_template != constants.DT_EXT and
662 utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
663 raise errors.OpPrereqError("Instance already has disk template %s" %
664 self.op.disk_template, errors.ECODE_INVAL)
665
666 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
667 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
668 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
669 " cluster (enabled templates: %s)" %
670 (self.op.disk_template, enabled_dts),
671 errors.ECODE_STATE)
672
673 default_vg = self.cfg.GetVGName()
674 if (not default_vg and
675 self.op.disk_template not in constants.DTS_NOT_LVM):
676 raise errors.OpPrereqError("Disk template conversions to lvm-based"
677 " instances are not supported by the cluster",
678 errors.ECODE_STATE)
679
680 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
681 msg="cannot change disk template")
682
683 # compute new disks' information
684 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
685 default_vg, self.op.ext_params)
686
687 # mirror node verification
688 if self.op.disk_template in constants.DTS_INT_MIRROR:
689 if self.op.remote_node_uuid == pnode_uuid:
690 raise errors.OpPrereqError("Given new secondary node %s is the same"
691 " as the primary node of the instance" %
692 self.op.remote_node, errors.ECODE_STATE)
693 CheckNodeOnline(self, self.op.remote_node_uuid)
694 CheckNodeNotDrained(self, self.op.remote_node_uuid)
695 CheckNodeVmCapable(self, self.op.remote_node_uuid)
696
697 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
698 snode_group = self.cfg.GetNodeGroup(snode_info.group)
699 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
700 snode_group)
701 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
702 ignore=self.op.ignore_ipolicy)
703 if pnode_info.group != snode_info.group:
704 self.LogWarning("The primary and secondary nodes are in two"
705 " different node groups; the disk parameters"
706 " from the first disk's node group will be"
707 " used")
708
709 # check that the template is in the primary node group's allowed templates
710 pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
711 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
712 pnode_group)
713 allowed_dts = ipolicy[constants.IPOLICY_DTS]
714 if self.op.disk_template not in allowed_dts:
715 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
716 " templates: %s)" % (self.op.disk_template,
717 utils.CommaJoin(allowed_dts)),
718 errors.ECODE_STATE)
719
720 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
721 # Make sure none of the nodes require exclusive storage
722 nodes = [pnode_info]
723 if self.op.disk_template in constants.DTS_INT_MIRROR:
724 assert snode_info
725 nodes.append(snode_info)
726 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
727 if compat.any(map(has_es, nodes)):
728 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
729 " storage is enabled" % (
730 self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
731 self.op.disk_template))
732 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
733
734 # TODO remove setting the disk template after DiskSetParams exists.
735 # node capacity checks
736 if (self.op.disk_template == constants.DT_PLAIN and
737 utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
738 # we ensure that no capacity checks will be made for conversions from
739 # the 'drbd' to the 'plain' disk template
740 pass
741 elif (self.op.disk_template == constants.DT_DRBD8 and
742 utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
743 # for conversions from the 'plain' to the 'drbd' disk template, check
744 # only the remote node's capacity
745 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
746 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
747 elif self.op.disk_template in constants.DTS_LVM:
748 # rest lvm-based capacity checks
749 node_uuids = [pnode_uuid]
750 if self.op.remote_node_uuid:
751 node_uuids.append(self.op.remote_node_uuid)
752 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
753 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
754 elif self.op.disk_template == constants.DT_RBD:
755 # CheckRADOSFreeSpace() is simply a placeholder
756 CheckRADOSFreeSpace()
757 elif self.op.disk_template == constants.DT_EXT:
758 # FIXME: Capacity checks for extstorage template, if exists
759 pass
760 else:
761 # FIXME: Checks about other non lvm-based disk templates
762 pass
763
764 def _PreCheckDisks(self, ispec):
765 """CheckPrereq checks related to disk changes.
766
767 @type ispec: dict
768 @param ispec: instance specs to be updated with the new disks
769
770 """
771 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
772
773 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
774 excl_stor = compat.any(
775 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
776 )
777
778 # Get the group access type
779 node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
780 node_group = self.cfg.GetNodeGroup(node_info.group)
781 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
782
783 group_access_types = dict(
784 (dt, group_disk_params[dt].get(
785 constants.RBD_ACCESS, constants.DISK_KERNELSPACE))
786 for dt in constants.DISK_TEMPLATES)
787
788 # Check disk modifications. This is done here and not in CheckArguments
789 # (as with NICs), because we need to know the instance's disk template
790 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
791 group_access_types)
792 # Don't enforce param types here in case it's an ext disk added. The check
793 # happens inside _VerifyDiskModification.
794 self._CheckMods("disk", self.op.disks, {}, ver_fn)
795
796 self.diskmod = PrepareContainerMods(self.op.disks, None)
797
798 def _PrepareDiskMod(_, disk, params, __):
799 disk.name = params.get(constants.IDISK_NAME, None)
800
801 # Verify disk changes (operating on a copy)
802 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
803 disks = copy.deepcopy(inst_disks)
804 ApplyContainerMods("disk", disks, None, self.diskmod, None, None,
805 _PrepareDiskMod, None, None)
806 utils.ValidateDeviceNames("disk", disks)
807 if len(disks) > constants.MAX_DISKS:
808 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
809 " more" % constants.MAX_DISKS,
810 errors.ECODE_STATE)
811 disk_sizes = [disk.size for disk in inst_disks]
812 disk_sizes.extend(params["size"] for (op, idx, params, private) in
813 self.diskmod if op == constants.DDM_ADD)
814 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
815 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
816
817 # either --online or --offline was passed
818 if self.op.offline is not None:
819 if self.op.offline:
820 msg = "can't change to offline without being down first"
821 else:
822 msg = "can't change to online (down) without being offline first"
823 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
824 msg=msg)
825
826 @staticmethod
827 def _InstanceCommunicationDDM(cfg, instance_communication, instance):
828 """Create a NIC mod that adds or removes the instance
829 communication NIC to a running instance.
830
831 The NICS are dynamically created using the Dynamic Device
832 Modification (DDM). This function produces a NIC modification
833 (mod) that inserts an additional NIC meant for instance
834 communication in or removes an existing instance communication NIC
835 from a running instance, using DDM.
836
837 @type cfg: L{config.ConfigWriter}
838 @param cfg: cluster configuration
839
840 @type instance_communication: boolean
841 @param instance_communication: whether instance communication is
842 enabled or disabled
843
844 @type instance: L{objects.Instance}
845 @param instance: instance to which the NIC mod will be applied to
846
847 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
848 (L{constants.DDM_REMOVE}, -1, parameters) or
849 L{None}
850 @return: DDM mod containing an action to add or remove the NIC, or
851 None if nothing needs to be done
852
853 """
854 nic_name = ComputeInstanceCommunicationNIC(instance.name)
855
856 instance_communication_nic = None
857
858 for nic in instance.nics:
859 if nic.name == nic_name:
860 instance_communication_nic = nic
861 break
862
863 if instance_communication and not instance_communication_nic:
864 action = constants.DDM_ADD
865 params = {constants.INIC_NAME: nic_name,
866 constants.INIC_MAC: constants.VALUE_GENERATE,
867 constants.INIC_IP: constants.NIC_IP_POOL,
868 constants.INIC_NETWORK:
869 cfg.GetInstanceCommunicationNetwork()}
870 elif not instance_communication and instance_communication_nic:
871 action = constants.DDM_REMOVE
872 params = None
873 else:
874 action = None
875 params = None
876
877 if action is not None:
878 return (action, -1, params)
879 else:
880 return None
881
882 def _GetInstanceInfo(self, cluster_hvparams):
883 pnode_uuid = self.instance.primary_node
884 instance_info = self.rpc.call_instance_info(
885 pnode_uuid, self.instance.name, self.instance.hypervisor,
886 cluster_hvparams)
887 return instance_info
888
889 def _CheckHotplug(self):
890 if self.op.hotplug or self.op.hotplug_if_possible:
891 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
892 self.instance)
893 if result.fail_msg:
894 if self.op.hotplug:
895 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
896 prereq=True, ecode=errors.ECODE_STATE)
897 else:
898 self.LogWarning(result.fail_msg)
899 self.op.hotplug = False
900 self.LogInfo("Modification will take place without hotplugging.")
901 else:
902 self.op.hotplug = True
903
904 def _PrepareNicCommunication(self):
905 # add or remove NIC for instance communication
906 if self.op.instance_communication is not None:
907 mod = self._InstanceCommunicationDDM(self.cfg,
908 self.op.instance_communication,
909 self.instance)
910 if mod is not None:
911 self.op.nics.append(mod)
912
913 self.nicmod = PrepareContainerMods(self.op.nics, InstNicModPrivate)
914
915 def _ProcessHVParams(self, node_uuids):
916 if self.op.hvparams:
917 hv_type = self.instance.hypervisor
918 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
919 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
920 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
921
922 # local check
923 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
924 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
925 self.hv_proposed = self.hv_new = hv_new # the new actual values
926 self.hv_inst = i_hvdict # the new dict (without defaults)
927 else:
928 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
929 self.instance.os,
930 self.instance.hvparams)
931 self.hv_new = self.hv_inst = {}
932
933 def _ProcessBeParams(self):
934 if self.op.beparams:
935 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
936 use_none=True)
937 objects.UpgradeBeParams(i_bedict)
938 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
939 be_new = self.cluster.SimpleFillBE(i_bedict)
940 self.be_proposed = self.be_new = be_new # the new actual values
941 self.be_inst = i_bedict # the new dict (without defaults)
942 else:
943 self.be_new = self.be_inst = {}
944 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
945 return self.cluster.FillBE(self.instance)
946
947 def _ValidateCpuParams(self):
948 # CPU param validation -- checking every time a parameter is
949 # changed to cover all cases where either CPU mask or vcpus have
950 # changed
951 if (constants.BE_VCPUS in self.be_proposed and
952 constants.HV_CPU_MASK in self.hv_proposed):
953 cpu_list = \
954 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
955 # Verify mask is consistent with number of vCPUs. Can skip this
956 # test if only 1 entry in the CPU mask, which means same mask
957 # is applied to all vCPUs.
958 if (len(cpu_list) > 1 and
959 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
960 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
961 " CPU mask [%s]" %
962 (self.be_proposed[constants.BE_VCPUS],
963 self.hv_proposed[constants.HV_CPU_MASK]),
964 errors.ECODE_INVAL)
965
966 # Only perform this test if a new CPU mask is given
967 if constants.HV_CPU_MASK in self.hv_new and cpu_list:
968 # Calculate the largest CPU number requested
969 max_requested_cpu = max(map(max, cpu_list))
970 # Check that all of the instance's nodes have enough physical CPUs to
971 # satisfy the requested CPU mask
972 hvspecs = [(self.instance.hypervisor,
973 self.cfg.GetClusterInfo()
974 .hvparams[self.instance.hypervisor])]
975 CheckNodesPhysicalCPUs(self,
976 self.cfg.GetInstanceNodes(self.instance.uuid),
977 max_requested_cpu + 1,
978 hvspecs)
979
980 def _ProcessOsParams(self, node_uuids):
981 # osparams processing
982 instance_os = (self.op.os_name
983 if self.op.os_name and not self.op.force
984 else self.instance.os)
985
986 if self.op.osparams or self.op.osparams_private:
987 public_parms = self.op.osparams or {}
988 private_parms = self.op.osparams_private or {}
989 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
990
991 if dupe_keys:
992 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
993 utils.CommaJoin(dupe_keys))
994
995 self.os_inst = GetUpdatedParams(self.instance.osparams,
996 public_parms)
997 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
998 private_parms)
999
1000 CheckOSParams(self, True, node_uuids, instance_os,
1001 objects.FillDict(self.os_inst,
1002 self.os_inst_private),
1003 self.op.force_variant)
1004
1005 else:
1006 self.os_inst = {}
1007 self.os_inst_private = {}
1008
1009 def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
1010 #TODO(dynmem): do the appropriate check involving MINMEM
1011 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
1012 self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
1013 mem_check_list = [pnode_uuid]
1014 if self.be_new[constants.BE_AUTO_BALANCE]:
1015 # either we changed auto_balance to yes or it was from before
1016 mem_check_list.extend(
1017 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
1018 instance_info = self._GetInstanceInfo(cluster_hvparams)
1019 hvspecs = [(self.instance.hypervisor,
1020 cluster_hvparams)]
1021 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
1022 hvspecs)
1023 pninfo = nodeinfo[pnode_uuid]
1024 msg = pninfo.fail_msg
1025 if msg:
1026 # Assume the primary node is unreachable and go ahead
1027 self.warn.append("Can't get info from primary node %s: %s" %
1028 (self.cfg.GetNodeName(pnode_uuid), msg))
1029 else:
1030 (_, _, (pnhvinfo, )) = pninfo.payload
1031 if not isinstance(pnhvinfo.get("memory_free", None), int):
1032 self.warn.append("Node data from primary node %s doesn't contain"
1033 " free memory information" %
1034 self.cfg.GetNodeName(pnode_uuid))
1035 elif instance_info.fail_msg:
1036 self.warn.append("Can't get instance runtime information: %s" %
1037 instance_info.fail_msg)
1038 else:
1039 if instance_info.payload:
1040 current_mem = int(instance_info.payload["memory"])
1041 else:
1042 # Assume instance not running
1043 # (there is a slight race condition here, but it's not very
1044 # probable, and we have no other way to check)
1045 # TODO: Describe race condition
1046 current_mem = 0
1047 #TODO(dynmem): do the appropriate check involving MINMEM
1048 miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem -
1049 pnhvinfo["memory_free"])
1050 if miss_mem > 0:
1051 raise errors.OpPrereqError("This change will prevent the instance"
1052 " from starting, due to %d MB of memory"
1053 " missing on its primary node" %
1054 miss_mem, errors.ECODE_NORES)
1055
1056 if self.be_new[constants.BE_AUTO_BALANCE]:
1057 secondary_nodes = \
1058 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1059 for node_uuid, nres in nodeinfo.items():
1060 if node_uuid not in secondary_nodes:
1061 continue
1062 nres.Raise("Can't get info from secondary node %s" %
1063 self.cfg.GetNodeName(node_uuid), prereq=True,
1064 ecode=errors.ECODE_STATE)
1065 (_, _, (nhvinfo, )) = nres.payload
1066 if not isinstance(nhvinfo.get("memory_free", None), int):
1067 raise errors.OpPrereqError("Secondary node %s didn't return free"
1068 " memory information" %
1069 self.cfg.GetNodeName(node_uuid),
1070 errors.ECODE_STATE)
1071 #TODO(dynmem): do the appropriate check involving MINMEM
1072 elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
1073 raise errors.OpPrereqError("This change will prevent the instance"
1074 " from failover to its secondary node"
1075 " %s, due to not enough memory" %
1076 self.cfg.GetNodeName(node_uuid),
1077 errors.ECODE_STATE)
1078
1079 if self.op.runtime_mem:
1080 remote_info = self.rpc.call_instance_info(
1081 self.instance.primary_node, self.instance.name,
1082 self.instance.hypervisor,
1083 cluster_hvparams)
1084 remote_info.Raise("Error checking node %s" %
1085 self.cfg.GetNodeName(self.instance.primary_node),
1086 prereq=True)
1087 if not remote_info.payload: # not running already
1088 raise errors.OpPrereqError("Instance %s is not running" %
1089 self.instance.name, errors.ECODE_STATE)
1090
1091 current_memory = remote_info.payload["memory"]
1092 if (not self.op.force and
1093 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
1094 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
1095 raise errors.OpPrereqError("Instance %s must have memory between %d"
1096 " and %d MB of memory unless --force is"
1097 " given" %
1098 (self.instance.name,
1099 self.be_proposed[constants.BE_MINMEM],
1100 self.be_proposed[constants.BE_MAXMEM]),
1101 errors.ECODE_INVAL)
1102
1103 delta = self.op.runtime_mem - current_memory
1104 if delta > 0:
1105 CheckNodeFreeMemory(
1106 self, self.instance.primary_node,
1107 "ballooning memory for instance %s" % self.instance.name, delta,
1108 self.instance.hypervisor,
1109 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1110
1111 def CheckPrereq(self):
1112 """Check prerequisites.
1113
1114 This only checks the instance list against the existing names.
1115
1116 """
1117 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
1118 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1119 self.cluster = self.cfg.GetClusterInfo()
1120 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
1121
1122 self.op.disks = self._LookupDiskMods()
1123
1124 assert self.instance is not None, \
1125 "Cannot retrieve locked instance %s" % self.op.instance_name
1126
1127 self.warn = []
1128
1129 if (self.op.pnode_uuid is not None and
1130 self.op.pnode_uuid != self.instance.primary_node and
1131 not self.op.force):
1132 instance_info = self._GetInstanceInfo(cluster_hvparams)
1133
1134 if instance_info.fail_msg:
1135 self.warn.append("Can't get instance runtime information: %s" %
1136 instance_info.fail_msg)
1137 elif instance_info.payload:
1138 raise errors.OpPrereqError(
1139 "Instance is still running on %s" %
1140 self.cfg.GetNodeName(self.instance.primary_node),
1141 errors.ECODE_STATE)
1142 pnode_uuid = self.instance.primary_node
1143 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
1144
1145 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
1146 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
1147
1148 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
1149 group_info = self.cfg.GetNodeGroup(pnode_info.group)
1150
1151 # dictionary with instance information after the modification
1152 ispec = {}
1153
1154 self._CheckHotplug()
1155
1156 self._PrepareNicCommunication()
1157
1158 # disks processing
1159 assert not (self.op.disk_template and self.op.disks), \
1160 "Can't modify disk template and apply disk changes at the same time"
1161
1162 if self.op.disk_template:
1163 self._PreCheckDiskTemplate(pnode_info)
1164
1165 self._PreCheckDisks(ispec)
1166
1167 self._ProcessHVParams(node_uuids)
1168 be_old = self._ProcessBeParams()
1169
1170 self._ValidateCpuParams()
1171 self._ProcessOsParams(node_uuids)
1172 self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)
1173
1174 # make self.cluster visible in the functions below
1175 cluster = self.cluster
1176
1177 def _PrepareNicCreate(_, params, private):
1178 self._PrepareNicModification(params, private, None, None,
1179 {}, cluster, pnode_uuid)
1180 return (None, None)
1181
1182 def _PrepareNicAttach(_, __, ___):
1183 raise errors.OpPrereqError("Attach operation is not supported for NICs",
1184 errors.ECODE_INVAL)
1185
1186 def _PrepareNicMod(_, nic, params, private):
1187 self._PrepareNicModification(params, private, nic.ip, nic.network,
1188 nic.nicparams, cluster, pnode_uuid)
1189 return None
1190
1191 def _PrepareNicRemove(_, params, __):
1192 ip = params.ip
1193 net = params.network
1194 if net is not None and ip is not None:
1195 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
1196
1197 def _PrepareNicDetach(_, __, ___):
1198 raise errors.OpPrereqError("Detach operation is not supported for NICs",
1199 errors.ECODE_INVAL)
1200
1201 # Verify NIC changes (operating on copy)
1202 nics = [nic.Copy() for nic in self.instance.nics]
1203 ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate,
1204 _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,
1205 _PrepareNicDetach)
1206 if len(nics) > constants.MAX_NICS:
1207 raise errors.OpPrereqError("Instance has too many network interfaces"
1208 " (%d), cannot add more" % constants.MAX_NICS,
1209 errors.ECODE_STATE)
1210
1211 # Pre-compute NIC changes (necessary to use result in hooks)
1212 self._nic_chgdesc = []
1213 if self.nicmod:
1214 # Operate on copies as this is still in prereq
1215 nics = [nic.Copy() for nic in self.instance.nics]
1216 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
1217 self._CreateNewNic, None, self._ApplyNicMods,
1218 self._RemoveNic, None)
1219 # Verify that NIC names are unique and valid
1220 utils.ValidateDeviceNames("NIC", nics)
1221 self._new_nics = nics
1222 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
1223 else:
1224 self._new_nics = None
1225 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
1226
1227 if not self.op.ignore_ipolicy:
1228 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
1229 group_info)
1230
1231 # Fill ispec with backend parameters
1232 ispec[constants.ISPEC_SPINDLE_USE] = \
1233 self.be_new.get(constants.BE_SPINDLE_USE, None)
1234 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
1235 None)
1236
1237 # Copy ispec to verify parameters with min/max values separately
1238 if self.op.disk_template:
1239 count = ispec[constants.ISPEC_DISK_COUNT]
1240 new_disk_types = [self.op.disk_template] * count
1241 else:
1242 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1243 add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)
1244 dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1245 if dev_type == constants.DT_DISKLESS and add_disk_count != 0:
1246 raise errors.ProgrammerError(
1247 "Conversion from diskless instance not possible and should have"
1248 " been caught")
1249
1250 new_disk_types = ([d.dev_type for d in old_disks] +
1251 [dev_type] * add_disk_count)
1252 ispec_max = ispec.copy()
1253 ispec_max[constants.ISPEC_MEM_SIZE] = \
1254 self.be_new.get(constants.BE_MAXMEM, None)
1255 res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
1256 new_disk_types)
1257 ispec_min = ispec.copy()
1258 ispec_min[constants.ISPEC_MEM_SIZE] = \
1259 self.be_new.get(constants.BE_MINMEM, None)
1260 res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
1261 new_disk_types)
1262
1263 if (res_max or res_min):
1264 # FIXME: Improve error message by including information about whether
1265 # the upper or lower limit of the parameter fails the ipolicy.
1266 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1267 (group_info, group_info.name,
1268 utils.CommaJoin(set(res_max + res_min))))
1269 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1270
1271 def _ConvertInstanceDisks(self, feedback_fn):
1272 """Converts the disks of an instance to another type.
1273
1274 This function converts the disks of an instance. It supports
1275 conversions among all the available disk types except conversions
1276 between the LVM-based disk types, that use their separate code path.
1277 Also, this method does not support conversions that include the 'diskless'
1278 template and those targeting the 'blockdev' template.
1279
1280 @type feedback_fn: callable
1281 @param feedback_fn: function used to send feedback back to the caller
1282
1283 @rtype: NoneType
1284 @return: None
1285 @raise errors.OpPrereqError: in case of failure
1286
1287 """
1288 template_info = self.op.disk_template
1289 if self.op.disk_template == constants.DT_EXT:
1290 template_info = ":".join([self.op.disk_template,
1291 self.op.ext_params["provider"]])
1292
1293 old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1294 feedback_fn("Converting disk template from '%s' to '%s'" %
1295 (old_template, template_info))
1296
1297 assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or
1298 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
1299 ("Unsupported disk template conversion from '%s' to '%s'" %
1300 (old_template, self.op.disk_template))
1301
1302 pnode_uuid = self.instance.primary_node
1303 snode_uuid = []
1304 if self.op.remote_node_uuid:
1305 snode_uuid = [self.op.remote_node_uuid]
1306
1307 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1308
1309 feedback_fn("Generating new '%s' disk template..." % template_info)
1310 file_storage_dir = CalculateFileStorageDir(
1311 self.op.disk_template, self.cfg, self.instance.name,
1312 file_storage_dir=self.op.file_storage_dir)
1313 new_disks = GenerateDiskTemplate(self,
1314 self.op.disk_template,
1315 self.instance.uuid,
1316 pnode_uuid,
1317 snode_uuid,
1318 self.disks_info,
1319 file_storage_dir,
1320 self.op.file_driver,
1321 0,
1322 feedback_fn,
1323 self.diskparams)
1324
1325 # Create the new block devices for the instance.
1326 feedback_fn("Creating new empty disks of type '%s'..." % template_info)
1327 try:
1328 CreateDisks(self, self.instance, disk_template=self.op.disk_template,
1329 disks=new_disks)
1330 except errors.OpExecError:
1331 self.LogWarning("Device creation failed")
1332 for disk in new_disks:
1333 self.cfg.ReleaseDRBDMinors(disk.uuid)
1334 raise
1335
1336 # Transfer the data from the old to the newly created disks of the instance.
1337 feedback_fn("Populating the new empty disks of type '%s'..." %
1338 template_info)
1339 for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
1340 feedback_fn(" - copying data from disk %s (%s), size %s" %
1341 (idx, old.dev_type,
1342 utils.FormatUnit(new.size, "h")))
1343 if old.dev_type == constants.DT_DRBD8:
1344 old = old.children[0]
1345 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
1346 (new, self.instance))
1347 msg = result.fail_msg
1348 if msg:
1349 # A disk failed to copy. Abort the conversion operation and rollback
1350 # the modifications to the previous state. The instance will remain
1351 # intact.
1352 if self.op.disk_template == constants.DT_DRBD8:
1353 new = new.children[0]
1354 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
1355 (old.logical_id[1], new.logical_id[1]))
1356 try:
1357 self.LogInfo("Some disks failed to copy")
1358 self.LogInfo("The instance will not be affected, aborting operation")
1359 self.LogInfo("Removing newly created disks of type '%s'..." %
1360 template_info)
1361 RemoveDisks(self, self.instance, disks=new_disks)
1362 self.LogInfo("Newly created disks removed successfully")
1363 finally:
1364 for disk in new_disks:
1365 self.cfg.ReleaseDRBDMinors(disk.uuid)
1366 result.Raise("Error while converting the instance's template")
1367
1368 # In case of DRBD disk, return its port to the pool
1369 for disk in old_disks:
1370 if disk.dev_type == constants.DT_DRBD8:
1371 tcp_port = disk.logical_id[2]
1372 self.cfg.AddTcpUdpPort(tcp_port)
1373
1374 # Remove old disks from the instance.
1375 feedback_fn("Detaching old disks (%s) from the instance and removing"
1376 " them from cluster config" % old_template)
1377 for old_disk in old_disks:
1378 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1379
1380 # Attach the new disks to the instance.
1381 feedback_fn("Adding new disks (%s) to cluster config and attaching"
1382 " them to the instance" % template_info)
1383 for (idx, new_disk) in enumerate(new_disks):
1384 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1385
1386 # Re-read the instance from the configuration.
1387 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1388
1389 # Release node locks while waiting for sync and disks removal.
1390 ReleaseLocks(self, locking.LEVEL_NODE)
1391
1392 disk_abort = not WaitForSync(self, self.instance,
1393 oneshot=not self.op.wait_for_sync)
1394 if disk_abort:
1395 raise errors.OpExecError("There are some degraded disks for"
1396 " this instance, please cleanup manually")
1397
1398 feedback_fn("Removing old block devices of type '%s'..." % old_template)
1399 RemoveDisks(self, self.instance, disks=old_disks)
1400
1401 # Node resource locks will be released by the caller.
1402
1403 def _ConvertPlainToDrbd(self, feedback_fn):
1404 """Converts an instance from plain to drbd.
1405
1406 """
1407 feedback_fn("Converting disk template from 'plain' to 'drbd'")
1408
1409 pnode_uuid = self.instance.primary_node
1410 snode_uuid = self.op.remote_node_uuid
1411 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1412
1413 assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN])
1414
1415 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
1416 self.instance.uuid, pnode_uuid,
1417 [snode_uuid], self.disks_info,
1418 None, None, 0,
1419 feedback_fn, self.diskparams)
1420 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
1421 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
1422 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
1423 info = GetInstanceInfoText(self.instance)
1424 feedback_fn("Creating additional volumes...")
1425 # first, create the missing data and meta devices
1426 for disk in anno_disks:
1427 # unfortunately this is... not too nice
1428 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
1429 info, True, p_excl_stor)
1430 for child in disk.children:
1431 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
1432 s_excl_stor)
1433 # at this stage, all new LVs have been created, we can rename the
1434 # old ones
1435 feedback_fn("Renaming original volumes...")
1436 rename_list = [(o, n.children[0].logical_id)
1437 for (o, n) in zip(old_disks, new_disks)]
1438 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
1439 result.Raise("Failed to rename original LVs")
1440
1441 feedback_fn("Initializing DRBD devices...")
1442 # all child devices are in place, we can now create the DRBD devices
1443 try:
1444 for disk in anno_disks:
1445 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
1446 (snode_uuid, s_excl_stor)]:
1447 f_create = node_uuid == pnode_uuid
1448 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
1449 f_create, excl_stor)
1450 except errors.GenericError, e:
1451 feedback_fn("Initializing of DRBD devices failed;"
1452 " renaming back original volumes...")
1453 rename_back_list = [(n.children[0], o.logical_id)
1454 for (n, o) in zip(new_disks, old_disks)]
1455 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
1456 result.Raise("Failed to rename LVs back after error %s" % str(e))
1457 raise
1458
1459 # Remove the old disks from the instance
1460 for old_disk in old_disks:
1461 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1462
1463 # Attach the new disks to the instance
1464 for (idx, new_disk) in enumerate(new_disks):
1465 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1466
1467 # re-read the instance from the configuration
1468 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1469
1470 # Release node locks while waiting for sync
1471 ReleaseLocks(self, locking.LEVEL_NODE)
1472
1473 # disks are created, waiting for sync
1474 disk_abort = not WaitForSync(self, self.instance,
1475 oneshot=not self.op.wait_for_sync)
1476 if disk_abort:
1477 raise errors.OpExecError("There are some degraded disks for"
1478 " this instance, please cleanup manually")
1479
1480 # Node resource locks will be released by caller
1481
1482 def _ConvertDrbdToPlain(self, feedback_fn):
1483 """Converts an instance from drbd to plain.
1484
1485 """
1486 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1487 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1488 assert len(secondary_nodes) == 1
1489 assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])
1490
1491 feedback_fn("Converting disk template from 'drbd' to 'plain'")
1492
1493 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
1494 new_disks = [d.children[0] for d in disks]
1495
1496 # copy over size, mode and name and set the correct nodes
1497 for parent, child in zip(old_disks, new_disks):
1498 child.size = parent.size
1499 child.mode = parent.mode
1500 child.name = parent.name
1501 child.nodes = [self.instance.primary_node]
1502
1503 # this is a DRBD disk, return its port to the pool
1504 for disk in old_disks:
1505 tcp_port = disk.logical_id[2]
1506 self.cfg.AddTcpUdpPort(tcp_port)
1507
1508 # Remove the old disks from the instance
1509 for old_disk in old_disks:
1510 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1511
1512 # Attach the new disks to the instance
1513 for (idx, new_disk) in enumerate(new_disks):
1514 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1515
1516 # re-read the instance from the configuration
1517 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1518
1519 # Release locks in case removing disks takes a while
1520 ReleaseLocks(self, locking.LEVEL_NODE)
1521
1522 feedback_fn("Removing volumes on the secondary node...")
1523 RemoveDisks(self, self.instance, disks=old_disks,
1524 target_node_uuid=secondary_nodes[0])
1525
1526 feedback_fn("Removing unneeded volumes on the primary node...")
1527 meta_disks = []
1528 for idx, disk in enumerate(old_disks):
1529 meta_disks.append(disk.children[1])
1530 RemoveDisks(self, self.instance, disks=meta_disks)
1531
1532 def _HotplugDevice(self, action, dev_type, device, extra, seq):
1533 self.LogInfo("Trying to hotplug device...")
1534 msg = "hotplug:"
1535 result = self.rpc.call_hotplug_device(self.instance.primary_node,
1536 self.instance, action, dev_type,
1537 (device, self.instance),
1538 extra, seq)
1539 if result.fail_msg:
1540 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
1541 self.LogInfo("Continuing execution..")
1542 msg += "failed"
1543 else:
1544 self.LogInfo("Hotplug done.")
1545 msg += "done"
1546 return msg
1547
1548 def _FillFileDriver(self):
1549 if not self.op.file_driver:
1550 self.op.file_driver = constants.FD_DEFAULT
1551 elif self.op.file_driver not in constants.FILE_DRIVER:
1552 raise errors.OpPrereqError("Invalid file driver name '%s'" %
1553 self.op.file_driver, errors.ECODE_INVAL)
1554
1555 def _GenerateDiskTemplateWrapper(self, idx, disk_type, params):
1556 file_path = CalculateFileStorageDir(
1557 disk_type, self.cfg, self.instance.name,
1558 file_storage_dir=self.op.file_storage_dir)
1559
1560 self._FillFileDriver()
1561
1562 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1563 return \
1564 GenerateDiskTemplate(self, disk_type, self.instance.uuid,
1565 self.instance.primary_node, secondary_nodes,
1566 [params], file_path, self.op.file_driver, idx,
1567 self.Log, self.diskparams)[0]
1568
1569 def _CreateNewDisk(self, idx, params, _):
1570 """Creates a new disk.
1571
1572 """
1573 # add a new disk
1574 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1575 disk = self._GenerateDiskTemplateWrapper(idx, disk_template,
1576 params)
1577 new_disks = CreateDisks(self, self.instance, disks=[disk])
1578 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
1579
1580 # re-read the instance from the configuration
1581 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1582
1583 if self.cluster.prealloc_wipe_disks:
1584 # Wipe new disk
1585 WipeOrCleanupDisks(self, self.instance,
1586 disks=[(idx, disk, 0)],
1587 cleanup=new_disks)
1588
1589 changes = [
1590 ("disk/%d" % idx,
1591 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
1592 ]
1593 if self.op.hotplug:
1594 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
1595 (disk, self.instance),
1596 self.instance, True, idx)
1597 if result.fail_msg:
1598 changes.append(("disk/%d" % idx, "assemble:failed"))
1599 self.LogWarning("Can't assemble newly created disk %d: %s",
1600 idx, result.fail_msg)
1601 else:
1602 _, link_name, uri = result.payload
1603 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1604 constants.HOTPLUG_TARGET_DISK,
1605 disk, (link_name, uri), idx)
1606 changes.append(("disk/%d" % idx, msg))
1607
1608 return (disk, changes)
1609
1610 def _PostAddDisk(self, _, disk):
1611 if not WaitForSync(self, self.instance, disks=[disk],
1612 oneshot=not self.op.wait_for_sync):
1613 raise errors.OpExecError("Failed to sync disks of %s" %
1614 self.instance.name)
1615
1616 # the disk is active at this point, so deactivate it if the instance disks
1617 # are supposed to be inactive
1618 if not self.instance.disks_active:
1619 ShutdownInstanceDisks(self, self.instance, disks=[disk])
1620
1621 def _AttachDisk(self, idx, params, _):
1622 """Attaches an existing disk to an instance.
1623
1624 """
1625 uuid = params.get("uuid", None)
1626 name = params.get(constants.IDISK_NAME, None)
1627
1628 disk = self.GenericGetDiskInfo(uuid, name)
1629
1630 # Rename disk before attaching (if disk is filebased)
1631 if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH):
1632 # Add disk size/mode, else GenerateDiskTemplate will not work.
1633 params[constants.IDISK_SIZE] = disk.size
1634 params[constants.IDISK_MODE] = str(disk.mode)
1635 dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)
1636 new_logical_id = dummy_disk.logical_id
1637 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1638 [(disk, new_logical_id)])
1639 result.Raise("Failed before attach")
1640 self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)
1641 disk.logical_id = new_logical_id
1642
1643 # Attach disk to instance
1644 self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)
1645
1646 # re-read the instance from the configuration
1647 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1648
1649 changes = [
1650 ("disk/%d" % idx,
1651 "attach:size=%s,mode=%s" % (disk.size, disk.mode)),
1652 ]
1653
1654 disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,
1655 disks=[disk])
1656 if not disks_ok:
1657 changes.append(("disk/%d" % idx, "assemble:failed"))
1658 return disk, changes
1659
1660 if self.op.hotplug:
1661 _, link_name, uri = payloads[0]
1662 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1663 constants.HOTPLUG_TARGET_DISK,
1664 disk, (link_name, uri), idx)
1665 changes.append(("disk/%d" % idx, msg))
1666
1667 return (disk, changes)
1668
1669 def _ModifyDisk(self, idx, disk, params, _):
1670 """Modifies a disk.
1671
1672 """
1673 changes = []
1674 if constants.IDISK_MODE in params:
1675 disk.mode = params.get(constants.IDISK_MODE)
1676 changes.append(("disk.mode/%d" % idx, disk.mode))
1677
1678 if constants.IDISK_NAME in params:
1679 disk.name = params.get(constants.IDISK_NAME)
1680 changes.append(("disk.name/%d" % idx, disk.name))
1681
1682 # Modify arbitrary params in case instance template is ext
1683
1684 for key, value in params.iteritems():
1685 if (key not in constants.MODIFIABLE_IDISK_PARAMS and
1686 disk.dev_type == constants.DT_EXT):
1687 # stolen from GetUpdatedParams: default means reset/delete
1688 if value.lower() == constants.VALUE_DEFAULT:
1689 try:
1690 del disk.params[key]
1691 except KeyError:
1692 pass
1693 else:
1694 disk.params[key] = value
1695 changes.append(("disk.params:%s/%d" % (key, idx), value))
1696
1697 # Update disk object
1698 self.cfg.Update(disk, self.feedback_fn)
1699
1700 return changes
1701
1702 def _RemoveDisk(self, idx, root, _):
1703 """Removes a disk.
1704
1705 """
1706 hotmsg = ""
1707 if self.op.hotplug:
1708 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1709 constants.HOTPLUG_TARGET_DISK,
1710 root, None, idx)
1711 ShutdownInstanceDisks(self, self.instance, [root])
1712
1713 RemoveDisks(self, self.instance, disks=[root])
1714
1715 # if this is a DRBD disk, return its port to the pool
1716 if root.dev_type in constants.DTS_DRBD:
1717 self.cfg.AddTcpUdpPort(root.logical_id[2])
1718
1719 # Remove disk from config
1720 self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
1721
1722 # re-read the instance from the configuration
1723 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1724
1725 return hotmsg
1726
1727 def _DetachDisk(self, idx, root, _):
1728 """Detaches a disk from an instance.
1729
1730 """
1731 hotmsg = ""
1732 if self.op.hotplug:
1733 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1734 constants.HOTPLUG_TARGET_DISK,
1735 root, None, idx)
1736
1737 # Always shutdown the disk before detaching.
1738 ShutdownInstanceDisks(self, self.instance, [root])
1739
1740 # Rename detached disk.
1741 #
1742 # Transform logical_id from:
1743 # <file_storage_dir>/<instance_name>/<disk_name>
1744 # to
1745 # <file_storage_dir>/<disk_name>
1746 if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
1747 file_driver = root.logical_id[0]
1748 instance_path, disk_name = os.path.split(root.logical_id[1])
1749 new_path = os.path.join(os.path.dirname(instance_path), disk_name)
1750 new_logical_id = (file_driver, new_path)
1751 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1752 [(root, new_logical_id)])
1753 result.Raise("Failed before detach")
1754 # Update logical_id
1755 self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)
1756
1757 # Remove disk from config
1758 self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)
1759
1760 # re-read the instance from the configuration
1761 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1762
1763 return hotmsg
1764
1765 def _CreateNewNic(self, idx, params, private):
1766 """Creates data structure for a new network interface.
1767
1768 """
1769 mac = params[constants.INIC_MAC]
1770 ip = params.get(constants.INIC_IP, None)
1771 net = params.get(constants.INIC_NETWORK, None)
1772 name = params.get(constants.INIC_NAME, None)
1773 net_uuid = self.cfg.LookupNetwork(net)
1774 #TODO: not private.filled?? can a nic have no nicparams??
1775 nicparams = private.filled
1776 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
1777 nicparams=nicparams)
1778 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1779
1780 changes = [
1781 ("nic.%d" % idx,
1782 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
1783 (mac, ip, private.filled[constants.NIC_MODE],
1784 private.filled[constants.NIC_LINK], net)),
1785 ]
1786
1787 if self.op.hotplug:
1788 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1789 constants.HOTPLUG_TARGET_NIC,
1790 nobj, None, idx)
1791 changes.append(("nic.%d" % idx, msg))
1792
1793 return (nobj, changes)
1794
1795 def _ApplyNicMods(self, idx, nic, params, private):
1796 """Modifies a network interface.
1797
1798 """
1799 changes = []
1800
1801 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
1802 if key in params:
1803 changes.append(("nic.%s/%d" % (key, idx), params[key]))
1804 setattr(nic, key, params[key])
1805
1806 new_net = params.get(constants.INIC_NETWORK, nic.network)
1807 new_net_uuid = self.cfg.LookupNetwork(new_net)
1808 if new_net_uuid != nic.network:
1809 changes.append(("nic.network/%d" % idx, new_net))
1810 nic.network = new_net_uuid
1811
1812 if private.filled:
1813 nic.nicparams = private.filled
1814
1815 for (key, val) in nic.nicparams.items():
1816 changes.append(("nic.%s/%d" % (key, idx), val))
1817
1818 if self.op.hotplug:
1819 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
1820 constants.HOTPLUG_TARGET_NIC,
1821 nic, None, idx)
1822 changes.append(("nic/%d" % idx, msg))
1823
1824 return changes
1825
1826 def _RemoveNic(self, idx, nic, _):
1827 if self.op.hotplug:
1828 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1829 constants.HOTPLUG_TARGET_NIC,
1830 nic, None, idx)
1831
1832 def Exec(self, feedback_fn):
1833 """Modifies an instance.
1834
1835 All parameters take effect only at the next restart of the instance.
1836
1837 """
1838 self.feedback_fn = feedback_fn
1839 # Process here the warnings from CheckPrereq, as we don't have a
1840 # feedback_fn there.
1841 # TODO: Replace with self.LogWarning
1842 for warn in self.warn:
1843 feedback_fn("WARNING: %s" % warn)
1844
1845 assert ((self.op.disk_template is None) ^
1846 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
1847 "Not owning any node resource locks"
1848
1849 result = []
1850
1851 # New primary node
1852 if self.op.pnode_uuid:
1853 self.instance.primary_node = self.op.pnode_uuid
1854
1855 # runtime memory
1856 if self.op.runtime_mem:
1857 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
1858 self.instance,
1859 self.op.runtime_mem)
1860 rpcres.Raise("Cannot modify instance runtime memory")
1861 result.append(("runtime_memory", self.op.runtime_mem))
1862
1863 # Apply disk changes
1864 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1865 ApplyContainerMods("disk", inst_disks, result, self.diskmod,
1866 self._CreateNewDisk, self._AttachDisk, self._ModifyDisk,
1867 self._RemoveDisk, self._DetachDisk,
1868 post_add_fn=self._PostAddDisk)
1869
1870 if self.op.disk_template:
1871 if __debug__:
1872 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
1873 if self.op.remote_node_uuid:
1874 check_nodes.add(self.op.remote_node_uuid)
1875 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
1876 owned = self.owned_locks(level)
1877 assert not (check_nodes - owned), \
1878 ("Not owning the correct locks, owning %r, expected at least %r" %
1879 (owned, check_nodes))
1880
1881 r_shut = ShutdownInstanceDisks(self, self.instance)
1882 if not r_shut:
1883 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
1884 " proceed with disk template conversion")
1885 #TODO make heterogeneous conversions work
1886 mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
1887 self.op.disk_template)
1888 try:
1889 if mode in self._DISK_CONVERSIONS:
1890 self._DISK_CONVERSIONS[mode](self, feedback_fn)
1891 else:
1892 self._ConvertInstanceDisks(feedback_fn)
1893 except:
1894 for disk in inst_disks:
1895 self.cfg.ReleaseDRBDMinors(disk.uuid)
1896 raise
1897 result.append(("disk_template", self.op.disk_template))
1898
1899 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
1900 assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \
1901 ("Expected disk template '%s', found '%s'" %
1902 (self.op.disk_template,
1903 self.cfg.GetInstanceDiskTemplate(self.instance.uuid)))
1904
1905 # Release node and resource locks if there are any (they might already have
1906 # been released during disk conversion)
1907 ReleaseLocks(self, locking.LEVEL_NODE)
1908 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1909
1910 # Apply NIC changes
1911 if self._new_nics is not None:
1912 self.instance.nics = self._new_nics
1913 result.extend(self._nic_chgdesc)
1914
1915 # hvparams changes
1916 if self.op.hvparams:
1917 self.instance.hvparams = self.hv_inst
1918 for key, val in self.op.hvparams.iteritems():
1919 result.append(("hv/%s" % key, val))
1920
1921 # beparams changes
1922 if self.op.beparams:
1923 self.instance.beparams = self.be_inst
1924 for key, val in self.op.beparams.iteritems():
1925 result.append(("be/%s" % key, val))
1926
1927 # OS change
1928 if self.op.os_name:
1929 self.instance.os = self.op.os_name
1930
1931 # osparams changes
1932 if self.op.osparams:
1933 self.instance.osparams = self.os_inst
1934 for key, val in self.op.osparams.iteritems():
1935 result.append(("os/%s" % key, val))
1936
1937 if self.op.osparams_private:
1938 self.instance.osparams_private = self.os_inst_private
1939 for key, val in self.op.osparams_private.iteritems():
1940 # Show the Private(...) blurb.
1941 result.append(("os_private/%s" % key, repr(val)))
1942
1943 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
1944
1945 if self.op.offline is None:
1946 # Ignore
1947 pass
1948 elif self.op.offline:
1949 # Mark instance as offline
1950 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
1951 result.append(("admin_state", constants.ADMINST_OFFLINE))
1952 else:
1953 # Mark instance as online, but stopped
1954 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
1955 result.append(("admin_state", constants.ADMINST_DOWN))
1956
1957 UpdateMetadata(feedback_fn, self.rpc, self.instance)
1958
1959 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
1960 self.owned_locks(locking.LEVEL_NODE)), \
1961 "All node locks should have been released by now"
1962
1963 return result
1964
1965 _DISK_CONVERSIONS = {
1966 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
1967 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
1968 }