Allow disk attachment with external storage
[ganeti-github.git] / lib / cmdlib / instance_set_params.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Logical unit setting parameters of a single instance."""
31
32 import copy
33 import logging
34 import os
35
36 from ganeti import compat
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import ht
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import netutils
43 from ganeti import objects
44 from ganeti import utils
45 import ganeti.rpc.node as rpc
46
47 from ganeti.cmdlib.base import LogicalUnit
48
49 from ganeti.cmdlib.common import INSTANCE_DOWN, \
50 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
51 CheckParamsNotGlobal, \
52 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
53 GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \
54 IsValidDiskAccessModeCombination, AnnotateDiskParams
55 from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
56 CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \
57 CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \
58 CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \
59 IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \
60 WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks
61 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
62 NICToTuple, CheckNodeNotDrained, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 UpdateMetadata, CheckForConflictingIp, \
66 PrepareContainerMods, ComputeInstanceCommunicationNIC, \
67 ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
68 CheckNodesPhysicalCPUs
69 import ganeti.masterd.instance
70
71
72 class InstNicModPrivate(object):
73 """Data structure for network interface modifications.
74
75 Used by L{LUInstanceSetParams}.
76
77 """
78 def __init__(self):
79 self.params = None
80 self.filled = None
81
82
83 class LUInstanceSetParams(LogicalUnit):
84 """Modifies an instances's parameters.
85
86 """
87 HPATH = "instance-modify"
88 HTYPE = constants.HTYPE_INSTANCE
89 REQ_BGL = False
90
91 def GenericGetDiskInfo(self, uuid=None, name=None):
92 """Find a disk object using the provided params.
93
94 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
95 config functions to retrieve the disk info based on these arguments.
96
97 In case of an error, raise the appropriate exceptions.
98 """
99 if uuid:
100 disk = self.cfg.GetDiskInfo(uuid)
101 if disk is None:
102 raise errors.OpPrereqError("No disk was found with this UUID: %s" %
103 uuid, errors.ECODE_INVAL)
104 elif name:
105 disk = self.cfg.GetDiskInfoByName(name)
106 if disk is None:
107 raise errors.OpPrereqError("No disk was found with this name: %s" %
108 name, errors.ECODE_INVAL)
109 else:
110 raise errors.ProgrammerError("No disk UUID or name was given")
111
112 return disk
113
114 @staticmethod
115 def _UpgradeDiskNicMods(kind, mods, verify_fn):
116 assert ht.TList(mods)
117 assert not mods or len(mods[0]) in (2, 3)
118
119 if mods and len(mods[0]) == 2:
120 result = []
121
122 addremove = 0
123 for op, params in mods:
124 if op in (constants.DDM_ADD, constants.DDM_ATTACH,
125 constants.DDM_REMOVE, constants.DDM_DETACH):
126 result.append((op, -1, params))
127 addremove += 1
128
129 if addremove > 1:
130 raise errors.OpPrereqError("Only one %s add/attach/remove/detach "
131 "operation is supported at a time" %
132 kind, errors.ECODE_INVAL)
133 else:
134 result.append((constants.DDM_MODIFY, op, params))
135
136 assert verify_fn(result)
137 else:
138 result = mods
139 return result
140
141 @staticmethod
142 def _CheckMods(kind, mods, key_types, item_fn):
143 """Ensures requested disk/NIC modifications are valid.
144
145 Note that the 'attach' action needs a way to refer to the UUID of the disk,
146 since the disk name is not unique cluster-wide. However, the UUID of the
147 disk is not settable but rather generated by Ganeti automatically,
148 therefore it cannot be passed as an IDISK parameter. For this reason, this
149 function will override the checks to accept uuid parameters solely for the
150 attach action.
151 """
152 # Create a key_types copy with the 'uuid' as a valid key type.
153 key_types_attach = key_types.copy()
154 key_types_attach['uuid'] = 'string'
155
156 for (op, _, params) in mods:
157 assert ht.TDict(params)
158
159 # If 'key_types' is an empty dict, we assume we have an
160 # 'ext' template and thus do not ForceDictType
161 if key_types:
162 utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH
163 else key_types_attach))
164
165 if op in (constants.DDM_REMOVE, constants.DDM_DETACH):
166 if params:
167 raise errors.OpPrereqError("No settings should be passed when"
168 " removing or detaching a %s" % kind,
169 errors.ECODE_INVAL)
170 elif op in (constants.DDM_ADD, constants.DDM_ATTACH,
171 constants.DDM_MODIFY):
172 item_fn(op, params)
173 else:
174 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
175
176 def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):
177 """Verifies a disk modification.
178
179 """
180 disk_type = params.get(
181 constants.IDISK_TYPE,
182 self.cfg.GetInstanceDiskTemplate(self.instance.uuid))
183
184 if op == constants.DDM_ADD:
185 params[constants.IDISK_TYPE] = disk_type
186
187 if disk_type == constants.DT_DISKLESS:
188 raise errors.OpPrereqError(
189 "Must specify disk type on diskless instance", errors.ECODE_INVAL)
190
191 if disk_type != constants.DT_EXT:
192 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
193
194 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
195 if mode not in constants.DISK_ACCESS_SET:
196 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
197 errors.ECODE_INVAL)
198
199 size = params.get(constants.IDISK_SIZE, None)
200 if size is None:
201 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
202 constants.IDISK_SIZE, errors.ECODE_INVAL)
203 size = int(size)
204
205 params[constants.IDISK_SIZE] = size
206 name = params.get(constants.IDISK_NAME, None)
207 if name is not None and name.lower() == constants.VALUE_NONE:
208 params[constants.IDISK_NAME] = None
209
210 # This check is necessary both when adding and attaching disks
211 if op in (constants.DDM_ADD, constants.DDM_ATTACH):
212 CheckSpindlesExclusiveStorage(params, excl_stor, True)
213 CheckDiskExtProvider(params, disk_type)
214
215 # Make sure we do not add syncing disks to instances with inactive disks
216 if not self.op.wait_for_sync and not self.instance.disks_active:
217 raise errors.OpPrereqError("Can't %s a disk to an instance with"
218 " deactivated disks and --no-wait-for-sync"
219 " given" % op, errors.ECODE_INVAL)
220
221 # Check disk access param (only for specific disks)
222 if disk_type in constants.DTS_HAVE_ACCESS:
223 access_type = params.get(constants.IDISK_ACCESS,
224 group_access_types[disk_type])
225 if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
226 disk_type, access_type):
227 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
228 " used with %s disk access param" %
229 (self.instance.hypervisor, access_type),
230 errors.ECODE_STATE)
231
232 if op == constants.DDM_ATTACH:
233 if len(params) != 1 or ('uuid' not in params and
234 constants.IDISK_NAME not in params):
235 raise errors.OpPrereqError("Only one argument is permitted in %s op,"
236 " either %s or uuid" % (constants.DDM_ATTACH,
237 constants.IDISK_NAME,
238 ),
239 errors.ECODE_INVAL)
240 self._CheckAttachDisk(params)
241
242 elif op == constants.DDM_MODIFY:
243 if constants.IDISK_SIZE in params:
244 raise errors.OpPrereqError("Disk size change not possible, use"
245 " grow-disk", errors.ECODE_INVAL)
246
247 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
248
249 # Disk modification supports changing only the disk name and mode.
250 # Changing arbitrary parameters is allowed only for ext disk template",
251 if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):
252 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
253 else:
254 # We have to check that the 'access' and 'disk_provider' parameters
255 # cannot be modified
256 for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:
257 if param in params:
258 raise errors.OpPrereqError("Disk '%s' parameter change is"
259 " not possible" % param,
260 errors.ECODE_INVAL)
261
262 name = params.get(constants.IDISK_NAME, None)
263 if name is not None and name.lower() == constants.VALUE_NONE:
264 params[constants.IDISK_NAME] = None
265
266 @staticmethod
267 def _VerifyNicModification(op, params):
268 """Verifies a network interface modification.
269
270 """
271 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
272 ip = params.get(constants.INIC_IP, None)
273 name = params.get(constants.INIC_NAME, None)
274 req_net = params.get(constants.INIC_NETWORK, None)
275 link = params.get(constants.NIC_LINK, None)
276 mode = params.get(constants.NIC_MODE, None)
277 if name is not None and name.lower() == constants.VALUE_NONE:
278 params[constants.INIC_NAME] = None
279 if req_net is not None:
280 if req_net.lower() == constants.VALUE_NONE:
281 params[constants.INIC_NETWORK] = None
282 req_net = None
283 elif link is not None or mode is not None:
284 raise errors.OpPrereqError("If network is given"
285 " mode or link should not",
286 errors.ECODE_INVAL)
287
288 if op == constants.DDM_ADD:
289 macaddr = params.get(constants.INIC_MAC, None)
290 if macaddr is None:
291 params[constants.INIC_MAC] = constants.VALUE_AUTO
292
293 if ip is not None:
294 if ip.lower() == constants.VALUE_NONE:
295 params[constants.INIC_IP] = None
296 else:
297 if ip.lower() == constants.NIC_IP_POOL:
298 if op == constants.DDM_ADD and req_net is None:
299 raise errors.OpPrereqError("If ip=pool, parameter network"
300 " cannot be none",
301 errors.ECODE_INVAL)
302 else:
303 if not netutils.IPAddress.IsValid(ip):
304 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
305 errors.ECODE_INVAL)
306
307 if constants.INIC_MAC in params:
308 macaddr = params[constants.INIC_MAC]
309 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
310 macaddr = utils.NormalizeAndValidateMac(macaddr)
311
312 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
313 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
314 " modifying an existing NIC",
315 errors.ECODE_INVAL)
316
317 def _LookupDiskIndex(self, idx):
318 """Looks up uuid or name of disk if necessary."""
319 try:
320 return int(idx)
321 except ValueError:
322 pass
323 for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
324 if d.name == idx or d.uuid == idx:
325 return i
326 raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
327
328 def _LookupDiskMods(self):
329 """Looks up uuid or name of disk if necessary."""
330 return [(op, self._LookupDiskIndex(idx), params)
331 for op, idx, params in self.op.disks]
332
333 def CheckArguments(self):
334 if not (self.op.nics or self.op.disks or self.op.disk_template or
335 self.op.hvparams or self.op.beparams or self.op.os_name or
336 self.op.osparams or self.op.offline is not None or
337 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
338 self.op.instance_communication is not None):
339 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
340
341 if self.op.hvparams:
342 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
343 "hypervisor", "instance", "cluster")
344
345 self.op.disks = self._UpgradeDiskNicMods(
346 "disk", self.op.disks,
347 ht.TSetParamsMods(ht.TIDiskParams))
348 self.op.nics = self._UpgradeDiskNicMods(
349 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
350
351 # Check disk template modifications
352 if self.op.disk_template:
353 if self.op.disks:
354 raise errors.OpPrereqError("Disk template conversion and other disk"
355 " changes not supported at the same time",
356 errors.ECODE_INVAL)
357
358 # mirrored template node checks
359 if self.op.disk_template in constants.DTS_INT_MIRROR:
360 if not self.op.remote_node:
361 raise errors.OpPrereqError("Changing the disk template to a mirrored"
362 " one requires specifying a secondary"
363 " node", errors.ECODE_INVAL)
364 elif self.op.remote_node:
365 self.LogWarning("Changing the disk template to a non-mirrored one,"
366 " the secondary node will be ignored")
367 # the secondary node must be cleared in order to be ignored, otherwise
368 # the operation will fail, in the GenerateDiskTemplate method
369 self.op.remote_node = None
370
371 # file-based template checks
372 if self.op.disk_template in constants.DTS_FILEBASED:
373 self._FillFileDriver()
374
375 # Check NIC modifications
376 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
377 self._VerifyNicModification)
378
379 if self.op.pnode:
380 (self.op.pnode_uuid, self.op.pnode) = \
381 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
382
383 def _CheckAttachDisk(self, params):
384 """Check if disk can be attached to an instance.
385
386 Check if the disk and instance have the same template. Also, check if the
387 disk nodes are visible from the instance.
388 """
389 uuid = params.get("uuid", None)
390 name = params.get(constants.IDISK_NAME, None)
391
392 disk = self.GenericGetDiskInfo(uuid, name)
393 instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
394 if (disk.dev_type != instance_template and
395 instance_template != constants.DT_DISKLESS):
396 raise errors.OpPrereqError("Instance has '%s' template while disk has"
397 " '%s' template" %
398 (instance_template, disk.dev_type),
399 errors.ECODE_INVAL)
400
401 instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
402 # Make sure we do not attach disks to instances on wrong nodes. If the
403 # instance is diskless, that instance is associated only to the primary
404 # node, whereas the disk can be associated to two nodes in the case of DRBD,
405 # hence, we have a subset check here.
406 if disk.nodes and not set(instance_nodes).issubset(set(disk.nodes)):
407 raise errors.OpPrereqError("Disk nodes are %s while the instance's nodes"
408 " are %s" %
409 (disk.nodes, instance_nodes),
410 errors.ECODE_INVAL)
411
412 def ExpandNames(self):
413 self._ExpandAndLockInstance()
414 self.needed_locks[locking.LEVEL_NODEGROUP] = []
415 # Can't even acquire node locks in shared mode as upcoming changes in
416 # Ganeti 2.6 will start to modify the node object on disk conversion
417 self.needed_locks[locking.LEVEL_NODE] = []
418 self.needed_locks[locking.LEVEL_NODE_RES] = []
419 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
420 # Look node group to look up the ipolicy
421 self.share_locks[locking.LEVEL_NODEGROUP] = 1
422 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
423 self.dont_collate_locks[locking.LEVEL_NODE] = True
424 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
425
426 def DeclareLocks(self, level):
427 if level == locking.LEVEL_NODEGROUP:
428 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
429 # Acquire locks for the instance's nodegroups optimistically. Needs
430 # to be verified in CheckPrereq
431 self.needed_locks[locking.LEVEL_NODEGROUP] = \
432 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
433 elif level == locking.LEVEL_NODE:
434 self._LockInstancesNodes()
435 if self.op.disk_template and self.op.remote_node:
436 (self.op.remote_node_uuid, self.op.remote_node) = \
437 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
438 self.op.remote_node)
439 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
440 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
441 # Copy node locks
442 self.needed_locks[locking.LEVEL_NODE_RES] = \
443 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
444
445 def BuildHooksEnv(self):
446 """Build hooks env.
447
448 This runs on the master, primary and secondaries.
449
450 """
451 args = {}
452 if constants.BE_MINMEM in self.be_new:
453 args["minmem"] = self.be_new[constants.BE_MINMEM]
454 if constants.BE_MAXMEM in self.be_new:
455 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
456 if constants.BE_VCPUS in self.be_new:
457 args["vcpus"] = self.be_new[constants.BE_VCPUS]
458 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
459 # information at all.
460
461 if self._new_nics is not None:
462 nics = []
463
464 for nic in self._new_nics:
465 n = copy.deepcopy(nic)
466 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
467 n.nicparams = nicparams
468 nics.append(NICToTuple(self, n))
469
470 args["nics"] = nics
471
472 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
473 if self.op.disk_template:
474 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
475 if self.op.runtime_mem:
476 env["RUNTIME_MEMORY"] = self.op.runtime_mem
477
478 return env
479
480 def BuildHooksNodes(self):
481 """Build hooks nodes.
482
483 """
484 nl = [self.cfg.GetMasterNode()] + \
485 list(self.cfg.GetInstanceNodes(self.instance.uuid))
486 return (nl, nl)
487
488 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
489 old_params, cluster, pnode_uuid):
490
491 update_params_dict = dict([(key, params[key])
492 for key in constants.NICS_PARAMETERS
493 if key in params])
494
495 req_link = update_params_dict.get(constants.NIC_LINK, None)
496 req_mode = update_params_dict.get(constants.NIC_MODE, None)
497
498 new_net_uuid = None
499 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
500 if new_net_uuid_or_name:
501 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
502 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
503
504 if old_net_uuid:
505 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
506
507 if new_net_uuid:
508 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
509 if not netparams:
510 raise errors.OpPrereqError("No netparams found for the network"
511 " %s, probably not connected" %
512 new_net_obj.name, errors.ECODE_INVAL)
513 new_params = dict(netparams)
514 else:
515 new_params = GetUpdatedParams(old_params, update_params_dict)
516
517 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
518
519 new_filled_params = cluster.SimpleFillNIC(new_params)
520 objects.NIC.CheckParameterSyntax(new_filled_params)
521
522 new_mode = new_filled_params[constants.NIC_MODE]
523 if new_mode == constants.NIC_MODE_BRIDGED:
524 bridge = new_filled_params[constants.NIC_LINK]
525 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
526 if msg:
527 msg = "Error checking bridges on node '%s': %s" % \
528 (self.cfg.GetNodeName(pnode_uuid), msg)
529 if self.op.force:
530 self.warn.append(msg)
531 else:
532 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
533
534 elif new_mode == constants.NIC_MODE_ROUTED:
535 ip = params.get(constants.INIC_IP, old_ip)
536 if ip is None and not new_net_uuid:
537 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
538 " on a routed NIC if not attached to a"
539 " network", errors.ECODE_INVAL)
540
541 elif new_mode == constants.NIC_MODE_OVS:
542 # TODO: check OVS link
543 self.LogInfo("OVS links are currently not checked for correctness")
544
545 if constants.INIC_MAC in params:
546 mac = params[constants.INIC_MAC]
547 if mac is None:
548 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
549 errors.ECODE_INVAL)
550 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
551 # otherwise generate the MAC address
552 params[constants.INIC_MAC] = \
553 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
554 else:
555 # or validate/reserve the current one
556 try:
557 self.cfg.ReserveMAC(mac, self.proc.GetECId())
558 except errors.ReservationError:
559 raise errors.OpPrereqError("MAC address '%s' already in use"
560 " in cluster" % mac,
561 errors.ECODE_NOTUNIQUE)
562 elif new_net_uuid != old_net_uuid:
563
564 def get_net_prefix(net_uuid):
565 mac_prefix = None
566 if net_uuid:
567 nobj = self.cfg.GetNetwork(net_uuid)
568 mac_prefix = nobj.mac_prefix
569
570 return mac_prefix
571
572 new_prefix = get_net_prefix(new_net_uuid)
573 old_prefix = get_net_prefix(old_net_uuid)
574 if old_prefix != new_prefix:
575 params[constants.INIC_MAC] = \
576 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
577
578 # if there is a change in (ip, network) tuple
579 new_ip = params.get(constants.INIC_IP, old_ip)
580 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
581 if new_ip:
582 # if IP is pool then require a network and generate one IP
583 if new_ip.lower() == constants.NIC_IP_POOL:
584 if new_net_uuid:
585 try:
586 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
587 except errors.ReservationError:
588 raise errors.OpPrereqError("Unable to get a free IP"
589 " from the address pool",
590 errors.ECODE_STATE)
591 self.LogInfo("Chose IP %s from network %s",
592 new_ip,
593 new_net_obj.name)
594 params[constants.INIC_IP] = new_ip
595 else:
596 raise errors.OpPrereqError("ip=pool, but no network found",
597 errors.ECODE_INVAL)
598 # Reserve new IP if in the new network if any
599 elif new_net_uuid:
600 try:
601 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
602 check=self.op.conflicts_check)
603 self.LogInfo("Reserving IP %s in network %s",
604 new_ip, new_net_obj.name)
605 except errors.ReservationError:
606 raise errors.OpPrereqError("IP %s not available in network %s" %
607 (new_ip, new_net_obj.name),
608 errors.ECODE_NOTUNIQUE)
609 # new network is None so check if new IP is a conflicting IP
610 elif self.op.conflicts_check:
611 CheckForConflictingIp(self, new_ip, pnode_uuid)
612
613 # release old IP if old network is not None
614 if old_ip and old_net_uuid:
615 try:
616 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
617 except errors.AddressPoolError:
618 logging.warning("Release IP %s not contained in network %s",
619 old_ip, old_net_obj.name)
620
621 # there are no changes in (ip, network) tuple and old network is not None
622 elif (old_net_uuid is not None and
623 (req_link is not None or req_mode is not None)):
624 raise errors.OpPrereqError("Not allowed to change link or mode of"
625 " a NIC that is connected to a network",
626 errors.ECODE_INVAL)
627
628 private.params = new_params
629 private.filled = new_filled_params
630
631 def _PreCheckDiskTemplate(self, pnode_info):
632 """CheckPrereq checks related to a new disk template."""
633 # Arguments are passed to avoid configuration lookups
634 pnode_uuid = self.instance.primary_node
635
636 # TODO make sure heterogeneous disk types can be converted.
637 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
638 if disk_template == constants.DT_MIXED:
639 raise errors.OpPrereqError(
640 "Conversion from mixed is not yet supported.")
641
642 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
643 if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
644 raise errors.OpPrereqError(
645 "Conversion from the '%s' disk template is not supported"
646 % self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
647 errors.ECODE_INVAL)
648
649 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
650 raise errors.OpPrereqError("Conversion to the '%s' disk template is"
651 " not supported" % self.op.disk_template,
652 errors.ECODE_INVAL)
653
654 if (self.op.disk_template != constants.DT_EXT and
655 utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
656 raise errors.OpPrereqError("Instance already has disk template %s" %
657 self.op.disk_template, errors.ECODE_INVAL)
658
659 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
660 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
661 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
662 " cluster (enabled templates: %s)" %
663 (self.op.disk_template, enabled_dts),
664 errors.ECODE_STATE)
665
666 default_vg = self.cfg.GetVGName()
667 if (not default_vg and
668 self.op.disk_template not in constants.DTS_NOT_LVM):
669 raise errors.OpPrereqError("Disk template conversions to lvm-based"
670 " instances are not supported by the cluster",
671 errors.ECODE_STATE)
672
673 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
674 msg="cannot change disk template")
675
676 # compute new disks' information
677 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
678 default_vg, self.op.ext_params)
679
680 # mirror node verification
681 if self.op.disk_template in constants.DTS_INT_MIRROR:
682 if self.op.remote_node_uuid == pnode_uuid:
683 raise errors.OpPrereqError("Given new secondary node %s is the same"
684 " as the primary node of the instance" %
685 self.op.remote_node, errors.ECODE_STATE)
686 CheckNodeOnline(self, self.op.remote_node_uuid)
687 CheckNodeNotDrained(self, self.op.remote_node_uuid)
688 CheckNodeVmCapable(self, self.op.remote_node_uuid)
689
690 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
691 snode_group = self.cfg.GetNodeGroup(snode_info.group)
692 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
693 snode_group)
694 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
695 ignore=self.op.ignore_ipolicy)
696 if pnode_info.group != snode_info.group:
697 self.LogWarning("The primary and secondary nodes are in two"
698 " different node groups; the disk parameters"
699 " from the first disk's node group will be"
700 " used")
701
702 # check that the template is in the primary node group's allowed templates
703 pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
704 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
705 pnode_group)
706 allowed_dts = ipolicy[constants.IPOLICY_DTS]
707 if self.op.disk_template not in allowed_dts:
708 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
709 " templates: %s)" % (self.op.disk_template,
710 utils.CommaJoin(allowed_dts)),
711 errors.ECODE_STATE)
712
713 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
714 # Make sure none of the nodes require exclusive storage
715 nodes = [pnode_info]
716 if self.op.disk_template in constants.DTS_INT_MIRROR:
717 assert snode_info
718 nodes.append(snode_info)
719 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
720 if compat.any(map(has_es, nodes)):
721 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
722 " storage is enabled" % (
723 self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
724 self.op.disk_template))
725 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
726
727 # TODO remove setting the disk template after DiskSetParams exists.
728 # node capacity checks
729 if (self.op.disk_template == constants.DT_PLAIN and
730 utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
731 # we ensure that no capacity checks will be made for conversions from
732 # the 'drbd' to the 'plain' disk template
733 pass
734 elif (self.op.disk_template == constants.DT_DRBD8 and
735 utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
736 # for conversions from the 'plain' to the 'drbd' disk template, check
737 # only the remote node's capacity
738 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
739 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
740 elif self.op.disk_template in constants.DTS_LVM:
741 # rest lvm-based capacity checks
742 node_uuids = [pnode_uuid]
743 if self.op.remote_node_uuid:
744 node_uuids.append(self.op.remote_node_uuid)
745 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
746 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
747 elif self.op.disk_template == constants.DT_RBD:
748 # CheckRADOSFreeSpace() is simply a placeholder
749 CheckRADOSFreeSpace()
750 elif self.op.disk_template == constants.DT_EXT:
751 # FIXME: Capacity checks for extstorage template, if exists
752 pass
753 else:
754 # FIXME: Checks about other non lvm-based disk templates
755 pass
756
757 def _PreCheckDisks(self, ispec):
758 """CheckPrereq checks related to disk changes.
759
760 @type ispec: dict
761 @param ispec: instance specs to be updated with the new disks
762
763 """
764 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
765
766 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
767 excl_stor = compat.any(
768 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
769 )
770
771 # Get the group access type
772 node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
773 node_group = self.cfg.GetNodeGroup(node_info.group)
774 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
775
776 group_access_types = dict(
777 (dt, group_disk_params[dt].get(
778 constants.RBD_ACCESS, constants.DISK_KERNELSPACE))
779 for dt in constants.DISK_TEMPLATES)
780
781 # Check disk modifications. This is done here and not in CheckArguments
782 # (as with NICs), because we need to know the instance's disk template
783 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
784 group_access_types)
785 # Don't enforce param types here in case it's an ext disk added. The check
786 # happens inside _VerifyDiskModification.
787 self._CheckMods("disk", self.op.disks, {}, ver_fn)
788
789 self.diskmod = PrepareContainerMods(self.op.disks, None)
790
791 def _PrepareDiskMod(_, disk, params, __):
792 disk.name = params.get(constants.IDISK_NAME, None)
793
794 # Verify disk changes (operating on a copy)
795 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
796 disks = copy.deepcopy(inst_disks)
797 ApplyContainerMods("disk", disks, None, self.diskmod, None, None,
798 _PrepareDiskMod, None, None)
799 utils.ValidateDeviceNames("disk", disks)
800 if len(disks) > constants.MAX_DISKS:
801 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
802 " more" % constants.MAX_DISKS,
803 errors.ECODE_STATE)
804 disk_sizes = [disk.size for disk in inst_disks]
805 disk_sizes.extend(params["size"] for (op, idx, params, private) in
806 self.diskmod if op == constants.DDM_ADD)
807 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
808 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
809
810 # either --online or --offline was passed
811 if self.op.offline is not None:
812 if self.op.offline:
813 msg = "can't change to offline without being down first"
814 else:
815 msg = "can't change to online (down) without being offline first"
816 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
817 msg=msg)
818
819 @staticmethod
820 def _InstanceCommunicationDDM(cfg, instance_communication, instance):
821 """Create a NIC mod that adds or removes the instance
822 communication NIC to a running instance.
823
824 The NICS are dynamically created using the Dynamic Device
825 Modification (DDM). This function produces a NIC modification
826 (mod) that inserts an additional NIC meant for instance
827 communication in or removes an existing instance communication NIC
828 from a running instance, using DDM.
829
830 @type cfg: L{config.ConfigWriter}
831 @param cfg: cluster configuration
832
833 @type instance_communication: boolean
834 @param instance_communication: whether instance communication is
835 enabled or disabled
836
837 @type instance: L{objects.Instance}
838 @param instance: instance to which the NIC mod will be applied to
839
840 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
841 (L{constants.DDM_REMOVE}, -1, parameters) or
842 L{None}
843 @return: DDM mod containing an action to add or remove the NIC, or
844 None if nothing needs to be done
845
846 """
847 nic_name = ComputeInstanceCommunicationNIC(instance.name)
848
849 instance_communication_nic = None
850
851 for nic in instance.nics:
852 if nic.name == nic_name:
853 instance_communication_nic = nic
854 break
855
856 if instance_communication and not instance_communication_nic:
857 action = constants.DDM_ADD
858 params = {constants.INIC_NAME: nic_name,
859 constants.INIC_MAC: constants.VALUE_GENERATE,
860 constants.INIC_IP: constants.NIC_IP_POOL,
861 constants.INIC_NETWORK:
862 cfg.GetInstanceCommunicationNetwork()}
863 elif not instance_communication and instance_communication_nic:
864 action = constants.DDM_REMOVE
865 params = None
866 else:
867 action = None
868 params = None
869
870 if action is not None:
871 return (action, -1, params)
872 else:
873 return None
874
875 def _GetInstanceInfo(self, cluster_hvparams):
876 pnode_uuid = self.instance.primary_node
877 instance_info = self.rpc.call_instance_info(
878 pnode_uuid, self.instance.name, self.instance.hypervisor,
879 cluster_hvparams)
880 return instance_info
881
882 def _CheckHotplug(self):
883 if self.op.hotplug or self.op.hotplug_if_possible:
884 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
885 self.instance)
886 if result.fail_msg:
887 if self.op.hotplug:
888 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
889 prereq=True, ecode=errors.ECODE_STATE)
890 else:
891 self.LogWarning(result.fail_msg)
892 self.op.hotplug = False
893 self.LogInfo("Modification will take place without hotplugging.")
894 else:
895 self.op.hotplug = True
896
897 def _PrepareNicCommunication(self):
898 # add or remove NIC for instance communication
899 if self.op.instance_communication is not None:
900 mod = self._InstanceCommunicationDDM(self.cfg,
901 self.op.instance_communication,
902 self.instance)
903 if mod is not None:
904 self.op.nics.append(mod)
905
906 self.nicmod = PrepareContainerMods(self.op.nics, InstNicModPrivate)
907
908 def _ProcessHVParams(self, node_uuids):
909 if self.op.hvparams:
910 hv_type = self.instance.hypervisor
911 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
912 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
913 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
914
915 # local check
916 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
917 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
918 self.hv_proposed = self.hv_new = hv_new # the new actual values
919 self.hv_inst = i_hvdict # the new dict (without defaults)
920 else:
921 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
922 self.instance.os,
923 self.instance.hvparams)
924 self.hv_new = self.hv_inst = {}
925
926 def _ProcessBeParams(self):
927 if self.op.beparams:
928 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
929 use_none=True)
930 objects.UpgradeBeParams(i_bedict)
931 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
932 be_new = self.cluster.SimpleFillBE(i_bedict)
933 self.be_proposed = self.be_new = be_new # the new actual values
934 self.be_inst = i_bedict # the new dict (without defaults)
935 else:
936 self.be_new = self.be_inst = {}
937 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
938 return self.cluster.FillBE(self.instance)
939
940 def _ValidateCpuParams(self):
941 # CPU param validation -- checking every time a parameter is
942 # changed to cover all cases where either CPU mask or vcpus have
943 # changed
944 if (constants.BE_VCPUS in self.be_proposed and
945 constants.HV_CPU_MASK in self.hv_proposed):
946 cpu_list = \
947 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
948 # Verify mask is consistent with number of vCPUs. Can skip this
949 # test if only 1 entry in the CPU mask, which means same mask
950 # is applied to all vCPUs.
951 if (len(cpu_list) > 1 and
952 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
953 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
954 " CPU mask [%s]" %
955 (self.be_proposed[constants.BE_VCPUS],
956 self.hv_proposed[constants.HV_CPU_MASK]),
957 errors.ECODE_INVAL)
958
959 # Only perform this test if a new CPU mask is given
960 if constants.HV_CPU_MASK in self.hv_new and cpu_list:
961 # Calculate the largest CPU number requested
962 max_requested_cpu = max(map(max, cpu_list))
963 # Check that all of the instance's nodes have enough physical CPUs to
964 # satisfy the requested CPU mask
965 hvspecs = [(self.instance.hypervisor,
966 self.cfg.GetClusterInfo()
967 .hvparams[self.instance.hypervisor])]
968 CheckNodesPhysicalCPUs(self,
969 self.cfg.GetInstanceNodes(self.instance.uuid),
970 max_requested_cpu + 1,
971 hvspecs)
972
973 def _ProcessOsParams(self, node_uuids):
974 # osparams processing
975 instance_os = (self.op.os_name
976 if self.op.os_name and not self.op.force
977 else self.instance.os)
978
979 if self.op.osparams or self.op.osparams_private:
980 public_parms = self.op.osparams or {}
981 private_parms = self.op.osparams_private or {}
982 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
983
984 if dupe_keys:
985 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
986 utils.CommaJoin(dupe_keys))
987
988 self.os_inst = GetUpdatedParams(self.instance.osparams,
989 public_parms)
990 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
991 private_parms)
992
993 CheckOSParams(self, True, node_uuids, instance_os,
994 objects.FillDict(self.os_inst,
995 self.os_inst_private),
996 self.op.force_variant)
997
998 else:
999 self.os_inst = {}
1000 self.os_inst_private = {}
1001
1002 def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
1003 #TODO(dynmem): do the appropriate check involving MINMEM
1004 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
1005 self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
1006 mem_check_list = [pnode_uuid]
1007 if self.be_new[constants.BE_AUTO_BALANCE]:
1008 # either we changed auto_balance to yes or it was from before
1009 mem_check_list.extend(
1010 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
1011 instance_info = self._GetInstanceInfo(cluster_hvparams)
1012 hvspecs = [(self.instance.hypervisor,
1013 cluster_hvparams)]
1014 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
1015 hvspecs)
1016 pninfo = nodeinfo[pnode_uuid]
1017 msg = pninfo.fail_msg
1018 if msg:
1019 # Assume the primary node is unreachable and go ahead
1020 self.warn.append("Can't get info from primary node %s: %s" %
1021 (self.cfg.GetNodeName(pnode_uuid), msg))
1022 else:
1023 (_, _, (pnhvinfo, )) = pninfo.payload
1024 if not isinstance(pnhvinfo.get("memory_free", None), int):
1025 self.warn.append("Node data from primary node %s doesn't contain"
1026 " free memory information" %
1027 self.cfg.GetNodeName(pnode_uuid))
1028 elif instance_info.fail_msg:
1029 self.warn.append("Can't get instance runtime information: %s" %
1030 instance_info.fail_msg)
1031 else:
1032 if instance_info.payload:
1033 current_mem = int(instance_info.payload["memory"])
1034 else:
1035 # Assume instance not running
1036 # (there is a slight race condition here, but it's not very
1037 # probable, and we have no other way to check)
1038 # TODO: Describe race condition
1039 current_mem = 0
1040 #TODO(dynmem): do the appropriate check involving MINMEM
1041 miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem -
1042 pnhvinfo["memory_free"])
1043 if miss_mem > 0:
1044 raise errors.OpPrereqError("This change will prevent the instance"
1045 " from starting, due to %d MB of memory"
1046 " missing on its primary node" %
1047 miss_mem, errors.ECODE_NORES)
1048
1049 if self.be_new[constants.BE_AUTO_BALANCE]:
1050 secondary_nodes = \
1051 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1052 for node_uuid, nres in nodeinfo.items():
1053 if node_uuid not in secondary_nodes:
1054 continue
1055 nres.Raise("Can't get info from secondary node %s" %
1056 self.cfg.GetNodeName(node_uuid), prereq=True,
1057 ecode=errors.ECODE_STATE)
1058 (_, _, (nhvinfo, )) = nres.payload
1059 if not isinstance(nhvinfo.get("memory_free", None), int):
1060 raise errors.OpPrereqError("Secondary node %s didn't return free"
1061 " memory information" %
1062 self.cfg.GetNodeName(node_uuid),
1063 errors.ECODE_STATE)
1064 #TODO(dynmem): do the appropriate check involving MINMEM
1065 elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
1066 raise errors.OpPrereqError("This change will prevent the instance"
1067 " from failover to its secondary node"
1068 " %s, due to not enough memory" %
1069 self.cfg.GetNodeName(node_uuid),
1070 errors.ECODE_STATE)
1071
1072 if self.op.runtime_mem:
1073 remote_info = self.rpc.call_instance_info(
1074 self.instance.primary_node, self.instance.name,
1075 self.instance.hypervisor,
1076 cluster_hvparams)
1077 remote_info.Raise("Error checking node %s" %
1078 self.cfg.GetNodeName(self.instance.primary_node),
1079 prereq=True)
1080 if not remote_info.payload: # not running already
1081 raise errors.OpPrereqError("Instance %s is not running" %
1082 self.instance.name, errors.ECODE_STATE)
1083
1084 current_memory = remote_info.payload["memory"]
1085 if (not self.op.force and
1086 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
1087 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
1088 raise errors.OpPrereqError("Instance %s must have memory between %d"
1089 " and %d MB of memory unless --force is"
1090 " given" %
1091 (self.instance.name,
1092 self.be_proposed[constants.BE_MINMEM],
1093 self.be_proposed[constants.BE_MAXMEM]),
1094 errors.ECODE_INVAL)
1095
1096 delta = self.op.runtime_mem - current_memory
1097 if delta > 0:
1098 CheckNodeFreeMemory(
1099 self, self.instance.primary_node,
1100 "ballooning memory for instance %s" % self.instance.name, delta,
1101 self.instance.hypervisor,
1102 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1103
1104 def CheckPrereq(self):
1105 """Check prerequisites.
1106
1107 This only checks the instance list against the existing names.
1108
1109 """
1110 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
1111 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1112 self.cluster = self.cfg.GetClusterInfo()
1113 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
1114
1115 self.op.disks = self._LookupDiskMods()
1116
1117 assert self.instance is not None, \
1118 "Cannot retrieve locked instance %s" % self.op.instance_name
1119
1120 self.warn = []
1121
1122 if (self.op.pnode_uuid is not None and
1123 self.op.pnode_uuid != self.instance.primary_node and
1124 not self.op.force):
1125 instance_info = self._GetInstanceInfo(cluster_hvparams)
1126
1127 if instance_info.fail_msg:
1128 self.warn.append("Can't get instance runtime information: %s" %
1129 instance_info.fail_msg)
1130 elif instance_info.payload:
1131 raise errors.OpPrereqError(
1132 "Instance is still running on %s" %
1133 self.cfg.GetNodeName(self.instance.primary_node),
1134 errors.ECODE_STATE)
1135 pnode_uuid = self.instance.primary_node
1136 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
1137
1138 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
1139 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
1140
1141 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
1142 group_info = self.cfg.GetNodeGroup(pnode_info.group)
1143
1144 # dictionary with instance information after the modification
1145 ispec = {}
1146
1147 self._CheckHotplug()
1148
1149 self._PrepareNicCommunication()
1150
1151 # disks processing
1152 assert not (self.op.disk_template and self.op.disks), \
1153 "Can't modify disk template and apply disk changes at the same time"
1154
1155 if self.op.disk_template:
1156 self._PreCheckDiskTemplate(pnode_info)
1157
1158 self._PreCheckDisks(ispec)
1159
1160 self._ProcessHVParams(node_uuids)
1161 be_old = self._ProcessBeParams()
1162
1163 self._ValidateCpuParams()
1164 self._ProcessOsParams(node_uuids)
1165 self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)
1166
1167 # make self.cluster visible in the functions below
1168 cluster = self.cluster
1169
1170 def _PrepareNicCreate(_, params, private):
1171 self._PrepareNicModification(params, private, None, None,
1172 {}, cluster, pnode_uuid)
1173 return (None, None)
1174
1175 def _PrepareNicAttach(_, __, ___):
1176 raise errors.OpPrereqError("Attach operation is not supported for NICs",
1177 errors.ECODE_INVAL)
1178
1179 def _PrepareNicMod(_, nic, params, private):
1180 self._PrepareNicModification(params, private, nic.ip, nic.network,
1181 nic.nicparams, cluster, pnode_uuid)
1182 return None
1183
1184 def _PrepareNicRemove(_, params, __):
1185 ip = params.ip
1186 net = params.network
1187 if net is not None and ip is not None:
1188 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
1189
1190 def _PrepareNicDetach(_, __, ___):
1191 raise errors.OpPrereqError("Detach operation is not supported for NICs",
1192 errors.ECODE_INVAL)
1193
1194 # Verify NIC changes (operating on copy)
1195 nics = [nic.Copy() for nic in self.instance.nics]
1196 ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate,
1197 _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,
1198 _PrepareNicDetach)
1199 if len(nics) > constants.MAX_NICS:
1200 raise errors.OpPrereqError("Instance has too many network interfaces"
1201 " (%d), cannot add more" % constants.MAX_NICS,
1202 errors.ECODE_STATE)
1203
1204 # Pre-compute NIC changes (necessary to use result in hooks)
1205 self._nic_chgdesc = []
1206 if self.nicmod:
1207 # Operate on copies as this is still in prereq
1208 nics = [nic.Copy() for nic in self.instance.nics]
1209 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
1210 self._CreateNewNic, None, self._ApplyNicMods,
1211 self._RemoveNic, None)
1212 # Verify that NIC names are unique and valid
1213 utils.ValidateDeviceNames("NIC", nics)
1214 self._new_nics = nics
1215 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
1216 else:
1217 self._new_nics = None
1218 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
1219
1220 if not self.op.ignore_ipolicy:
1221 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
1222 group_info)
1223
1224 # Fill ispec with backend parameters
1225 ispec[constants.ISPEC_SPINDLE_USE] = \
1226 self.be_new.get(constants.BE_SPINDLE_USE, None)
1227 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
1228 None)
1229
1230 # Copy ispec to verify parameters with min/max values separately
1231 if self.op.disk_template:
1232 count = ispec[constants.ISPEC_DISK_COUNT]
1233 new_disk_types = [self.op.disk_template] * count
1234 else:
1235 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1236 add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)
1237 dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1238 if dev_type == constants.DT_DISKLESS and add_disk_count != 0:
1239 raise errors.ProgrammerError(
1240 "Conversion from diskless instance not possible and should have"
1241 " been caught")
1242
1243 new_disk_types = ([d.dev_type for d in old_disks] +
1244 [dev_type] * add_disk_count)
1245 ispec_max = ispec.copy()
1246 ispec_max[constants.ISPEC_MEM_SIZE] = \
1247 self.be_new.get(constants.BE_MAXMEM, None)
1248 res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
1249 new_disk_types)
1250 ispec_min = ispec.copy()
1251 ispec_min[constants.ISPEC_MEM_SIZE] = \
1252 self.be_new.get(constants.BE_MINMEM, None)
1253 res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
1254 new_disk_types)
1255
1256 if (res_max or res_min):
1257 # FIXME: Improve error message by including information about whether
1258 # the upper or lower limit of the parameter fails the ipolicy.
1259 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1260 (group_info, group_info.name,
1261 utils.CommaJoin(set(res_max + res_min))))
1262 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1263
1264 def _ConvertInstanceDisks(self, feedback_fn):
1265 """Converts the disks of an instance to another type.
1266
1267 This function converts the disks of an instance. It supports
1268 conversions among all the available disk types except conversions
1269 between the LVM-based disk types, that use their separate code path.
1270 Also, this method does not support conversions that include the 'diskless'
1271 template and those targeting the 'blockdev' template.
1272
1273 @type feedback_fn: callable
1274 @param feedback_fn: function used to send feedback back to the caller
1275
1276 @rtype: NoneType
1277 @return: None
1278 @raise errors.OpPrereqError: in case of failure
1279
1280 """
1281 template_info = self.op.disk_template
1282 if self.op.disk_template == constants.DT_EXT:
1283 template_info = ":".join([self.op.disk_template,
1284 self.op.ext_params["provider"]])
1285
1286 old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1287 feedback_fn("Converting disk template from '%s' to '%s'" %
1288 (old_template, template_info))
1289
1290 assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or
1291 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
1292 ("Unsupported disk template conversion from '%s' to '%s'" %
1293 (old_template, self.op.disk_template))
1294
1295 pnode_uuid = self.instance.primary_node
1296 snode_uuid = []
1297 if self.op.remote_node_uuid:
1298 snode_uuid = [self.op.remote_node_uuid]
1299
1300 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1301
1302 feedback_fn("Generating new '%s' disk template..." % template_info)
1303 file_storage_dir = CalculateFileStorageDir(
1304 self.op.disk_template, self.cfg, self.instance.name,
1305 file_storage_dir=self.op.file_storage_dir)
1306 new_disks = GenerateDiskTemplate(self,
1307 self.op.disk_template,
1308 self.instance.uuid,
1309 pnode_uuid,
1310 snode_uuid,
1311 self.disks_info,
1312 file_storage_dir,
1313 self.op.file_driver,
1314 0,
1315 feedback_fn,
1316 self.diskparams)
1317
1318 # Create the new block devices for the instance.
1319 feedback_fn("Creating new empty disks of type '%s'..." % template_info)
1320 try:
1321 CreateDisks(self, self.instance, disk_template=self.op.disk_template,
1322 disks=new_disks)
1323 except errors.OpExecError:
1324 self.LogWarning("Device creation failed")
1325 for disk in new_disks:
1326 self.cfg.ReleaseDRBDMinors(disk.uuid)
1327 raise
1328
1329 # Transfer the data from the old to the newly created disks of the instance.
1330 feedback_fn("Populating the new empty disks of type '%s'..." %
1331 template_info)
1332 for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
1333 feedback_fn(" - copying data from disk %s (%s), size %s" %
1334 (idx, old.dev_type,
1335 utils.FormatUnit(new.size, "h")))
1336 if old.dev_type == constants.DT_DRBD8:
1337 old = old.children[0]
1338 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
1339 (new, self.instance))
1340 msg = result.fail_msg
1341 if msg:
1342 # A disk failed to copy. Abort the conversion operation and rollback
1343 # the modifications to the previous state. The instance will remain
1344 # intact.
1345 if self.op.disk_template == constants.DT_DRBD8:
1346 new = new.children[0]
1347 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
1348 (old.logical_id[1], new.logical_id[1]))
1349 try:
1350 self.LogInfo("Some disks failed to copy")
1351 self.LogInfo("The instance will not be affected, aborting operation")
1352 self.LogInfo("Removing newly created disks of type '%s'..." %
1353 template_info)
1354 RemoveDisks(self, self.instance, disks=new_disks)
1355 self.LogInfo("Newly created disks removed successfully")
1356 finally:
1357 for disk in new_disks:
1358 self.cfg.ReleaseDRBDMinors(disk.uuid)
1359 result.Raise("Error while converting the instance's template")
1360
1361 # In case of DRBD disk, return its port to the pool
1362 for disk in old_disks:
1363 if disk.dev_type == constants.DT_DRBD8:
1364 tcp_port = disk.logical_id[2]
1365 self.cfg.AddTcpUdpPort(tcp_port)
1366
1367 # Remove old disks from the instance.
1368 feedback_fn("Detaching old disks (%s) from the instance and removing"
1369 " them from cluster config" % old_template)
1370 for old_disk in old_disks:
1371 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1372
1373 # Attach the new disks to the instance.
1374 feedback_fn("Adding new disks (%s) to cluster config and attaching"
1375 " them to the instance" % template_info)
1376 for (idx, new_disk) in enumerate(new_disks):
1377 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1378
1379 # Re-read the instance from the configuration.
1380 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1381
1382 # Release node locks while waiting for sync and disks removal.
1383 ReleaseLocks(self, locking.LEVEL_NODE)
1384
1385 disk_abort = not WaitForSync(self, self.instance,
1386 oneshot=not self.op.wait_for_sync)
1387 if disk_abort:
1388 raise errors.OpExecError("There are some degraded disks for"
1389 " this instance, please cleanup manually")
1390
1391 feedback_fn("Removing old block devices of type '%s'..." % old_template)
1392 RemoveDisks(self, self.instance, disks=old_disks)
1393
1394 # Node resource locks will be released by the caller.
1395
1396 def _ConvertPlainToDrbd(self, feedback_fn):
1397 """Converts an instance from plain to drbd.
1398
1399 """
1400 feedback_fn("Converting disk template from 'plain' to 'drbd'")
1401
1402 pnode_uuid = self.instance.primary_node
1403 snode_uuid = self.op.remote_node_uuid
1404 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1405
1406 assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN])
1407
1408 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
1409 self.instance.uuid, pnode_uuid,
1410 [snode_uuid], self.disks_info,
1411 None, None, 0,
1412 feedback_fn, self.diskparams)
1413 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
1414 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
1415 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
1416 info = GetInstanceInfoText(self.instance)
1417 feedback_fn("Creating additional volumes...")
1418 # first, create the missing data and meta devices
1419 for disk in anno_disks:
1420 # unfortunately this is... not too nice
1421 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
1422 info, True, p_excl_stor)
1423 for child in disk.children:
1424 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
1425 s_excl_stor)
1426 # at this stage, all new LVs have been created, we can rename the
1427 # old ones
1428 feedback_fn("Renaming original volumes...")
1429 rename_list = [(o, n.children[0].logical_id)
1430 for (o, n) in zip(old_disks, new_disks)]
1431 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
1432 result.Raise("Failed to rename original LVs")
1433
1434 feedback_fn("Initializing DRBD devices...")
1435 # all child devices are in place, we can now create the DRBD devices
1436 try:
1437 for disk in anno_disks:
1438 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
1439 (snode_uuid, s_excl_stor)]:
1440 f_create = node_uuid == pnode_uuid
1441 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
1442 f_create, excl_stor)
1443 except errors.GenericError, e:
1444 feedback_fn("Initializing of DRBD devices failed;"
1445 " renaming back original volumes...")
1446 rename_back_list = [(n.children[0], o.logical_id)
1447 for (n, o) in zip(new_disks, old_disks)]
1448 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
1449 result.Raise("Failed to rename LVs back after error %s" % str(e))
1450 raise
1451
1452 # Remove the old disks from the instance
1453 for old_disk in old_disks:
1454 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1455
1456 # Attach the new disks to the instance
1457 for (idx, new_disk) in enumerate(new_disks):
1458 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1459
1460 # re-read the instance from the configuration
1461 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1462
1463 # Release node locks while waiting for sync
1464 ReleaseLocks(self, locking.LEVEL_NODE)
1465
1466 # disks are created, waiting for sync
1467 disk_abort = not WaitForSync(self, self.instance,
1468 oneshot=not self.op.wait_for_sync)
1469 if disk_abort:
1470 raise errors.OpExecError("There are some degraded disks for"
1471 " this instance, please cleanup manually")
1472
1473 # Node resource locks will be released by caller
1474
1475 def _ConvertDrbdToPlain(self, feedback_fn):
1476 """Converts an instance from drbd to plain.
1477
1478 """
1479 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1480 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1481 assert len(secondary_nodes) == 1
1482 assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])
1483
1484 feedback_fn("Converting disk template from 'drbd' to 'plain'")
1485
1486 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
1487 new_disks = [d.children[0] for d in disks]
1488
1489 # copy over size, mode and name and set the correct nodes
1490 for parent, child in zip(old_disks, new_disks):
1491 child.size = parent.size
1492 child.mode = parent.mode
1493 child.name = parent.name
1494 child.nodes = [self.instance.primary_node]
1495
1496 # this is a DRBD disk, return its port to the pool
1497 for disk in old_disks:
1498 tcp_port = disk.logical_id[2]
1499 self.cfg.AddTcpUdpPort(tcp_port)
1500
1501 # Remove the old disks from the instance
1502 for old_disk in old_disks:
1503 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1504
1505 # Attach the new disks to the instance
1506 for (idx, new_disk) in enumerate(new_disks):
1507 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1508
1509 # re-read the instance from the configuration
1510 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1511
1512 # Release locks in case removing disks takes a while
1513 ReleaseLocks(self, locking.LEVEL_NODE)
1514
1515 feedback_fn("Removing volumes on the secondary node...")
1516 RemoveDisks(self, self.instance, disks=old_disks,
1517 target_node_uuid=secondary_nodes[0])
1518
1519 feedback_fn("Removing unneeded volumes on the primary node...")
1520 meta_disks = []
1521 for idx, disk in enumerate(old_disks):
1522 meta_disks.append(disk.children[1])
1523 RemoveDisks(self, self.instance, disks=meta_disks)
1524
1525 def _HotplugDevice(self, action, dev_type, device, extra, seq):
1526 self.LogInfo("Trying to hotplug device...")
1527 msg = "hotplug:"
1528 result = self.rpc.call_hotplug_device(self.instance.primary_node,
1529 self.instance, action, dev_type,
1530 (device, self.instance),
1531 extra, seq)
1532 if result.fail_msg:
1533 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
1534 self.LogInfo("Continuing execution..")
1535 msg += "failed"
1536 else:
1537 self.LogInfo("Hotplug done.")
1538 msg += "done"
1539 return msg
1540
1541 def _FillFileDriver(self):
1542 if not self.op.file_driver:
1543 self.op.file_driver = constants.FD_DEFAULT
1544 elif self.op.file_driver not in constants.FILE_DRIVER:
1545 raise errors.OpPrereqError("Invalid file driver name '%s'" %
1546 self.op.file_driver, errors.ECODE_INVAL)
1547
1548 def _GenerateDiskTemplateWrapper(self, idx, disk_type, params):
1549 file_path = CalculateFileStorageDir(
1550 disk_type, self.cfg, self.instance.name,
1551 file_storage_dir=self.op.file_storage_dir)
1552
1553 self._FillFileDriver()
1554
1555 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1556 return \
1557 GenerateDiskTemplate(self, disk_type, self.instance.uuid,
1558 self.instance.primary_node, secondary_nodes,
1559 [params], file_path, self.op.file_driver, idx,
1560 self.Log, self.diskparams)[0]
1561
1562 def _CreateNewDisk(self, idx, params, _):
1563 """Creates a new disk.
1564
1565 """
1566 # add a new disk
1567 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1568 disk = self._GenerateDiskTemplateWrapper(idx, disk_template,
1569 params)
1570 new_disks = CreateDisks(self, self.instance, disks=[disk])
1571 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
1572
1573 # re-read the instance from the configuration
1574 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1575
1576 if self.cluster.prealloc_wipe_disks:
1577 # Wipe new disk
1578 WipeOrCleanupDisks(self, self.instance,
1579 disks=[(idx, disk, 0)],
1580 cleanup=new_disks)
1581
1582 changes = [
1583 ("disk/%d" % idx,
1584 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
1585 ]
1586 if self.op.hotplug:
1587 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
1588 (disk, self.instance),
1589 self.instance, True, idx)
1590 if result.fail_msg:
1591 changes.append(("disk/%d" % idx, "assemble:failed"))
1592 self.LogWarning("Can't assemble newly created disk %d: %s",
1593 idx, result.fail_msg)
1594 else:
1595 _, link_name, uri = result.payload
1596 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1597 constants.HOTPLUG_TARGET_DISK,
1598 disk, (link_name, uri), idx)
1599 changes.append(("disk/%d" % idx, msg))
1600
1601 return (disk, changes)
1602
1603 def _PostAddDisk(self, _, disk):
1604 if not WaitForSync(self, self.instance, disks=[disk],
1605 oneshot=not self.op.wait_for_sync):
1606 raise errors.OpExecError("Failed to sync disks of %s" %
1607 self.instance.name)
1608
1609 # the disk is active at this point, so deactivate it if the instance disks
1610 # are supposed to be inactive
1611 if not self.instance.disks_active:
1612 ShutdownInstanceDisks(self, self.instance, disks=[disk])
1613
1614 def _AttachDisk(self, idx, params, _):
1615 """Attaches an existing disk to an instance.
1616
1617 """
1618 uuid = params.get("uuid", None)
1619 name = params.get(constants.IDISK_NAME, None)
1620
1621 disk = self.GenericGetDiskInfo(uuid, name)
1622
1623 # Rename disk before attaching (if disk is filebased)
1624 if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH):
1625 # Add disk size/mode, else GenerateDiskTemplate will not work.
1626 params[constants.IDISK_SIZE] = disk.size
1627 params[constants.IDISK_MODE] = str(disk.mode)
1628 dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)
1629 new_logical_id = dummy_disk.logical_id
1630 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1631 [(disk, new_logical_id)])
1632 result.Raise("Failed before attach")
1633 self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)
1634 disk.logical_id = new_logical_id
1635
1636 # Attach disk to instance
1637 self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)
1638
1639 # re-read the instance from the configuration
1640 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1641
1642 changes = [
1643 ("disk/%d" % idx,
1644 "attach:size=%s,mode=%s" % (disk.size, disk.mode)),
1645 ]
1646
1647 disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,
1648 disks=[disk])
1649 if not disks_ok:
1650 changes.append(("disk/%d" % idx, "assemble:failed"))
1651 return disk, changes
1652
1653 if self.op.hotplug:
1654 _, link_name, uri = payloads[0]
1655 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1656 constants.HOTPLUG_TARGET_DISK,
1657 disk, (link_name, uri), idx)
1658 changes.append(("disk/%d" % idx, msg))
1659
1660 return (disk, changes)
1661
1662 def _ModifyDisk(self, idx, disk, params, _):
1663 """Modifies a disk.
1664
1665 """
1666 changes = []
1667 if constants.IDISK_MODE in params:
1668 disk.mode = params.get(constants.IDISK_MODE)
1669 changes.append(("disk.mode/%d" % idx, disk.mode))
1670
1671 if constants.IDISK_NAME in params:
1672 disk.name = params.get(constants.IDISK_NAME)
1673 changes.append(("disk.name/%d" % idx, disk.name))
1674
1675 # Modify arbitrary params in case instance template is ext
1676
1677 for key, value in params.iteritems():
1678 if (key not in constants.MODIFIABLE_IDISK_PARAMS and
1679 disk.dev_type == constants.DT_EXT):
1680 # stolen from GetUpdatedParams: default means reset/delete
1681 if value.lower() == constants.VALUE_DEFAULT:
1682 try:
1683 del disk.params[key]
1684 except KeyError:
1685 pass
1686 else:
1687 disk.params[key] = value
1688 changes.append(("disk.params:%s/%d" % (key, idx), value))
1689
1690 # Update disk object
1691 self.cfg.Update(disk, self.feedback_fn)
1692
1693 return changes
1694
1695 def _RemoveDisk(self, idx, root, _):
1696 """Removes a disk.
1697
1698 """
1699 hotmsg = ""
1700 if self.op.hotplug:
1701 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1702 constants.HOTPLUG_TARGET_DISK,
1703 root, None, idx)
1704 ShutdownInstanceDisks(self, self.instance, [root])
1705
1706 RemoveDisks(self, self.instance, disks=[root])
1707
1708 # if this is a DRBD disk, return its port to the pool
1709 if root.dev_type in constants.DTS_DRBD:
1710 self.cfg.AddTcpUdpPort(root.logical_id[2])
1711
1712 # Remove disk from config
1713 self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
1714
1715 # re-read the instance from the configuration
1716 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1717
1718 return hotmsg
1719
1720 def _DetachDisk(self, idx, root, _):
1721 """Detaches a disk from an instance.
1722
1723 """
1724 hotmsg = ""
1725 if self.op.hotplug:
1726 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1727 constants.HOTPLUG_TARGET_DISK,
1728 root, None, idx)
1729
1730 # Always shutdown the disk before detaching.
1731 ShutdownInstanceDisks(self, self.instance, [root])
1732
1733 # Rename detached disk.
1734 #
1735 # Transform logical_id from:
1736 # <file_storage_dir>/<instance_name>/<disk_name>
1737 # to
1738 # <file_storage_dir>/<disk_name>
1739 if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
1740 file_driver = root.logical_id[0]
1741 instance_path, disk_name = os.path.split(root.logical_id[1])
1742 new_path = os.path.join(os.path.dirname(instance_path), disk_name)
1743 new_logical_id = (file_driver, new_path)
1744 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1745 [(root, new_logical_id)])
1746 result.Raise("Failed before detach")
1747 # Update logical_id
1748 self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)
1749
1750 # Remove disk from config
1751 self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)
1752
1753 # re-read the instance from the configuration
1754 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1755
1756 return hotmsg
1757
1758 def _CreateNewNic(self, idx, params, private):
1759 """Creates data structure for a new network interface.
1760
1761 """
1762 mac = params[constants.INIC_MAC]
1763 ip = params.get(constants.INIC_IP, None)
1764 net = params.get(constants.INIC_NETWORK, None)
1765 name = params.get(constants.INIC_NAME, None)
1766 net_uuid = self.cfg.LookupNetwork(net)
1767 #TODO: not private.filled?? can a nic have no nicparams??
1768 nicparams = private.filled
1769 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
1770 nicparams=nicparams)
1771 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1772
1773 changes = [
1774 ("nic.%d" % idx,
1775 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
1776 (mac, ip, private.filled[constants.NIC_MODE],
1777 private.filled[constants.NIC_LINK], net)),
1778 ]
1779
1780 if self.op.hotplug:
1781 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1782 constants.HOTPLUG_TARGET_NIC,
1783 nobj, None, idx)
1784 changes.append(("nic.%d" % idx, msg))
1785
1786 return (nobj, changes)
1787
1788 def _ApplyNicMods(self, idx, nic, params, private):
1789 """Modifies a network interface.
1790
1791 """
1792 changes = []
1793
1794 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
1795 if key in params:
1796 changes.append(("nic.%s/%d" % (key, idx), params[key]))
1797 setattr(nic, key, params[key])
1798
1799 new_net = params.get(constants.INIC_NETWORK, nic.network)
1800 new_net_uuid = self.cfg.LookupNetwork(new_net)
1801 if new_net_uuid != nic.network:
1802 changes.append(("nic.network/%d" % idx, new_net))
1803 nic.network = new_net_uuid
1804
1805 if private.filled:
1806 nic.nicparams = private.filled
1807
1808 for (key, val) in nic.nicparams.items():
1809 changes.append(("nic.%s/%d" % (key, idx), val))
1810
1811 if self.op.hotplug:
1812 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
1813 constants.HOTPLUG_TARGET_NIC,
1814 nic, None, idx)
1815 changes.append(("nic/%d" % idx, msg))
1816
1817 return changes
1818
1819 def _RemoveNic(self, idx, nic, _):
1820 if self.op.hotplug:
1821 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1822 constants.HOTPLUG_TARGET_NIC,
1823 nic, None, idx)
1824
1825 def Exec(self, feedback_fn):
1826 """Modifies an instance.
1827
1828 All parameters take effect only at the next restart of the instance.
1829
1830 """
1831 self.feedback_fn = feedback_fn
1832 # Process here the warnings from CheckPrereq, as we don't have a
1833 # feedback_fn there.
1834 # TODO: Replace with self.LogWarning
1835 for warn in self.warn:
1836 feedback_fn("WARNING: %s" % warn)
1837
1838 assert ((self.op.disk_template is None) ^
1839 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
1840 "Not owning any node resource locks"
1841
1842 result = []
1843
1844 # New primary node
1845 if self.op.pnode_uuid:
1846 self.instance.primary_node = self.op.pnode_uuid
1847
1848 # runtime memory
1849 if self.op.runtime_mem:
1850 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
1851 self.instance,
1852 self.op.runtime_mem)
1853 rpcres.Raise("Cannot modify instance runtime memory")
1854 result.append(("runtime_memory", self.op.runtime_mem))
1855
1856 # Apply disk changes
1857 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1858 ApplyContainerMods("disk", inst_disks, result, self.diskmod,
1859 self._CreateNewDisk, self._AttachDisk, self._ModifyDisk,
1860 self._RemoveDisk, self._DetachDisk,
1861 post_add_fn=self._PostAddDisk)
1862
1863 if self.op.disk_template:
1864 if __debug__:
1865 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
1866 if self.op.remote_node_uuid:
1867 check_nodes.add(self.op.remote_node_uuid)
1868 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
1869 owned = self.owned_locks(level)
1870 assert not (check_nodes - owned), \
1871 ("Not owning the correct locks, owning %r, expected at least %r" %
1872 (owned, check_nodes))
1873
1874 r_shut = ShutdownInstanceDisks(self, self.instance)
1875 if not r_shut:
1876 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
1877 " proceed with disk template conversion")
1878 #TODO make heterogeneous conversions work
1879 mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
1880 self.op.disk_template)
1881 try:
1882 if mode in self._DISK_CONVERSIONS:
1883 self._DISK_CONVERSIONS[mode](self, feedback_fn)
1884 else:
1885 self._ConvertInstanceDisks(feedback_fn)
1886 except:
1887 for disk in inst_disks:
1888 self.cfg.ReleaseDRBDMinors(disk.uuid)
1889 raise
1890 result.append(("disk_template", self.op.disk_template))
1891
1892 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
1893 assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \
1894 ("Expected disk template '%s', found '%s'" %
1895 (self.op.disk_template,
1896 self.cfg.GetInstanceDiskTemplate(self.instance.uuid)))
1897
1898 # Release node and resource locks if there are any (they might already have
1899 # been released during disk conversion)
1900 ReleaseLocks(self, locking.LEVEL_NODE)
1901 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1902
1903 # Apply NIC changes
1904 if self._new_nics is not None:
1905 self.instance.nics = self._new_nics
1906 result.extend(self._nic_chgdesc)
1907
1908 # hvparams changes
1909 if self.op.hvparams:
1910 self.instance.hvparams = self.hv_inst
1911 for key, val in self.op.hvparams.iteritems():
1912 result.append(("hv/%s" % key, val))
1913
1914 # beparams changes
1915 if self.op.beparams:
1916 self.instance.beparams = self.be_inst
1917 for key, val in self.op.beparams.iteritems():
1918 result.append(("be/%s" % key, val))
1919
1920 # OS change
1921 if self.op.os_name:
1922 self.instance.os = self.op.os_name
1923
1924 # osparams changes
1925 if self.op.osparams:
1926 self.instance.osparams = self.os_inst
1927 for key, val in self.op.osparams.iteritems():
1928 result.append(("os/%s" % key, val))
1929
1930 if self.op.osparams_private:
1931 self.instance.osparams_private = self.os_inst_private
1932 for key, val in self.op.osparams_private.iteritems():
1933 # Show the Private(...) blurb.
1934 result.append(("os_private/%s" % key, repr(val)))
1935
1936 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
1937
1938 if self.op.offline is None:
1939 # Ignore
1940 pass
1941 elif self.op.offline:
1942 # Mark instance as offline
1943 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
1944 result.append(("admin_state", constants.ADMINST_OFFLINE))
1945 else:
1946 # Mark instance as online, but stopped
1947 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
1948 result.append(("admin_state", constants.ADMINST_DOWN))
1949
1950 UpdateMetadata(feedback_fn, self.rpc, self.instance)
1951
1952 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
1953 self.owned_locks(locking.LEVEL_NODE)), \
1954 "All node locks should have been released by now"
1955
1956 return result
1957
1958 _DISK_CONVERSIONS = {
1959 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
1960 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
1961 }