Allow disk attachment to diskless instances
[ganeti-github.git] / lib / cmdlib / instance_set_params.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Logical unit setting parameters of a single instance."""
31
32 import copy
33 import logging
34 import os
35
36 from ganeti import compat
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import ht
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import netutils
43 from ganeti import objects
44 from ganeti import utils
45 import ganeti.rpc.node as rpc
46
47 from ganeti.cmdlib.base import LogicalUnit
48
49 from ganeti.cmdlib.common import INSTANCE_DOWN, \
50 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
51 CheckParamsNotGlobal, \
52 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
53 GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \
54 IsValidDiskAccessModeCombination, AnnotateDiskParams
55 from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
56 CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \
57 CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \
58 CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \
59 IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \
60 WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks
61 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
62 NICToTuple, CheckNodeNotDrained, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 UpdateMetadata, CheckForConflictingIp, \
66 PrepareContainerMods, ComputeInstanceCommunicationNIC, \
67 ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
68 CheckNodesPhysicalCPUs
69 import ganeti.masterd.instance
70
71
72 class InstNicModPrivate(object):
73 """Data structure for network interface modifications.
74
75 Used by L{LUInstanceSetParams}.
76
77 """
78 def __init__(self):
79 self.params = None
80 self.filled = None
81
82
83 class LUInstanceSetParams(LogicalUnit):
84 """Modifies an instances's parameters.
85
86 """
87 HPATH = "instance-modify"
88 HTYPE = constants.HTYPE_INSTANCE
89 REQ_BGL = False
90
91 def GenericGetDiskInfo(self, uuid=None, name=None):
92 """Find a disk object using the provided params.
93
94 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
95 config functions to retrieve the disk info based on these arguments.
96
97 In case of an error, raise the appropriate exceptions.
98 """
99 if uuid:
100 disk = self.cfg.GetDiskInfo(uuid)
101 if disk is None:
102 raise errors.OpPrereqError("No disk was found with this UUID: %s" %
103 uuid, errors.ECODE_INVAL)
104 elif name:
105 disk = self.cfg.GetDiskInfoByName(name)
106 if disk is None:
107 raise errors.OpPrereqError("No disk was found with this name: %s" %
108 name, errors.ECODE_INVAL)
109 else:
110 raise errors.ProgrammerError("No disk UUID or name was given")
111
112 return disk
113
114 @staticmethod
115 def _UpgradeDiskNicMods(kind, mods, verify_fn):
116 assert ht.TList(mods)
117 assert not mods or len(mods[0]) in (2, 3)
118
119 if mods and len(mods[0]) == 2:
120 result = []
121
122 addremove = 0
123 for op, params in mods:
124 if op in (constants.DDM_ADD, constants.DDM_ATTACH,
125 constants.DDM_REMOVE, constants.DDM_DETACH):
126 result.append((op, -1, params))
127 addremove += 1
128
129 if addremove > 1:
130 raise errors.OpPrereqError("Only one %s add/attach/remove/detach "
131 "operation is supported at a time" %
132 kind, errors.ECODE_INVAL)
133 else:
134 result.append((constants.DDM_MODIFY, op, params))
135
136 assert verify_fn(result)
137 else:
138 result = mods
139 return result
140
141 @staticmethod
142 def _CheckMods(kind, mods, key_types, item_fn):
143 """Ensures requested disk/NIC modifications are valid.
144
145 Note that the 'attach' action needs a way to refer to the UUID of the disk,
146 since the disk name is not unique cluster-wide. However, the UUID of the
147 disk is not settable but rather generated by Ganeti automatically,
148 therefore it cannot be passed as an IDISK parameter. For this reason, this
149 function will override the checks to accept uuid parameters solely for the
150 attach action.
151 """
152 # Create a key_types copy with the 'uuid' as a valid key type.
153 key_types_attach = key_types.copy()
154 key_types_attach['uuid'] = 'string'
155
156 for (op, _, params) in mods:
157 assert ht.TDict(params)
158
159 # If 'key_types' is an empty dict, we assume we have an
160 # 'ext' template and thus do not ForceDictType
161 if key_types:
162 utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH
163 else key_types_attach))
164
165 if op in (constants.DDM_REMOVE, constants.DDM_DETACH):
166 if params:
167 raise errors.OpPrereqError("No settings should be passed when"
168 " removing or detaching a %s" % kind,
169 errors.ECODE_INVAL)
170 elif op in (constants.DDM_ADD, constants.DDM_ATTACH,
171 constants.DDM_MODIFY):
172 item_fn(op, params)
173 else:
174 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
175
176 def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):
177 """Verifies a disk modification.
178
179 """
180 disk_type = params.get(
181 constants.IDISK_TYPE,
182 self.cfg.GetInstanceDiskTemplate(self.instance.uuid))
183
184 if op == constants.DDM_ADD:
185 params[constants.IDISK_TYPE] = disk_type
186
187 if disk_type == constants.DT_DISKLESS:
188 raise errors.OpPrereqError(
189 "Must specify disk type on diskless instance", errors.ECODE_INVAL)
190
191 if disk_type != constants.DT_EXT:
192 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
193
194 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
195 if mode not in constants.DISK_ACCESS_SET:
196 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
197 errors.ECODE_INVAL)
198
199 size = params.get(constants.IDISK_SIZE, None)
200 if size is None:
201 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
202 constants.IDISK_SIZE, errors.ECODE_INVAL)
203 size = int(size)
204
205 params[constants.IDISK_SIZE] = size
206 name = params.get(constants.IDISK_NAME, None)
207 if name is not None and name.lower() == constants.VALUE_NONE:
208 params[constants.IDISK_NAME] = None
209
210 # This check is necessary both when adding and attaching disks
211 if op in (constants.DDM_ADD, constants.DDM_ATTACH):
212 CheckSpindlesExclusiveStorage(params, excl_stor, True)
213 CheckDiskExtProvider(params, disk_type)
214
215 # Make sure we do not add syncing disks to instances with inactive disks
216 if not self.op.wait_for_sync and not self.instance.disks_active:
217 raise errors.OpPrereqError("Can't %s a disk to an instance with"
218 " deactivated disks and --no-wait-for-sync"
219 " given" % op, errors.ECODE_INVAL)
220
221 # Check disk access param (only for specific disks)
222 if disk_type in constants.DTS_HAVE_ACCESS:
223 access_type = params.get(constants.IDISK_ACCESS,
224 group_access_types[disk_type])
225 if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
226 disk_type, access_type):
227 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
228 " used with %s disk access param" %
229 (self.instance.hypervisor, access_type),
230 errors.ECODE_STATE)
231
232 if op == constants.DDM_ATTACH:
233 if len(params) != 1 or ('uuid' not in params and
234 constants.IDISK_NAME not in params):
235 raise errors.OpPrereqError("Only one argument is permitted in %s op,"
236 " either %s or uuid" % (constants.DDM_ATTACH,
237 constants.IDISK_NAME,
238 ),
239 errors.ECODE_INVAL)
240 self._CheckAttachDisk(params)
241
242 elif op == constants.DDM_MODIFY:
243 if constants.IDISK_SIZE in params:
244 raise errors.OpPrereqError("Disk size change not possible, use"
245 " grow-disk", errors.ECODE_INVAL)
246
247 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
248
249 # Disk modification supports changing only the disk name and mode.
250 # Changing arbitrary parameters is allowed only for ext disk template",
251 if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):
252 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
253 else:
254 # We have to check that the 'access' and 'disk_provider' parameters
255 # cannot be modified
256 for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:
257 if param in params:
258 raise errors.OpPrereqError("Disk '%s' parameter change is"
259 " not possible" % param,
260 errors.ECODE_INVAL)
261
262 name = params.get(constants.IDISK_NAME, None)
263 if name is not None and name.lower() == constants.VALUE_NONE:
264 params[constants.IDISK_NAME] = None
265
266 @staticmethod
267 def _VerifyNicModification(op, params):
268 """Verifies a network interface modification.
269
270 """
271 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
272 ip = params.get(constants.INIC_IP, None)
273 name = params.get(constants.INIC_NAME, None)
274 req_net = params.get(constants.INIC_NETWORK, None)
275 link = params.get(constants.NIC_LINK, None)
276 mode = params.get(constants.NIC_MODE, None)
277 if name is not None and name.lower() == constants.VALUE_NONE:
278 params[constants.INIC_NAME] = None
279 if req_net is not None:
280 if req_net.lower() == constants.VALUE_NONE:
281 params[constants.INIC_NETWORK] = None
282 req_net = None
283 elif link is not None or mode is not None:
284 raise errors.OpPrereqError("If network is given"
285 " mode or link should not",
286 errors.ECODE_INVAL)
287
288 if op == constants.DDM_ADD:
289 macaddr = params.get(constants.INIC_MAC, None)
290 if macaddr is None:
291 params[constants.INIC_MAC] = constants.VALUE_AUTO
292
293 if ip is not None:
294 if ip.lower() == constants.VALUE_NONE:
295 params[constants.INIC_IP] = None
296 else:
297 if ip.lower() == constants.NIC_IP_POOL:
298 if op == constants.DDM_ADD and req_net is None:
299 raise errors.OpPrereqError("If ip=pool, parameter network"
300 " cannot be none",
301 errors.ECODE_INVAL)
302 else:
303 if not netutils.IPAddress.IsValid(ip):
304 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
305 errors.ECODE_INVAL)
306
307 if constants.INIC_MAC in params:
308 macaddr = params[constants.INIC_MAC]
309 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
310 macaddr = utils.NormalizeAndValidateMac(macaddr)
311
312 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
313 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
314 " modifying an existing NIC",
315 errors.ECODE_INVAL)
316
317 def _LookupDiskIndex(self, idx):
318 """Looks up uuid or name of disk if necessary."""
319 try:
320 return int(idx)
321 except ValueError:
322 pass
323 for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
324 if d.name == idx or d.uuid == idx:
325 return i
326 raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
327
328 def _LookupDiskMods(self):
329 """Looks up uuid or name of disk if necessary."""
330 return [(op, self._LookupDiskIndex(idx), params)
331 for op, idx, params in self.op.disks]
332
333 def CheckArguments(self):
334 if not (self.op.nics or self.op.disks or self.op.disk_template or
335 self.op.hvparams or self.op.beparams or self.op.os_name or
336 self.op.osparams or self.op.offline is not None or
337 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
338 self.op.instance_communication is not None):
339 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
340
341 if self.op.hvparams:
342 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
343 "hypervisor", "instance", "cluster")
344
345 self.op.disks = self._UpgradeDiskNicMods(
346 "disk", self.op.disks,
347 ht.TSetParamsMods(ht.TIDiskParams))
348 self.op.nics = self._UpgradeDiskNicMods(
349 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
350
351 # Check disk template modifications
352 if self.op.disk_template:
353 if self.op.disks:
354 raise errors.OpPrereqError("Disk template conversion and other disk"
355 " changes not supported at the same time",
356 errors.ECODE_INVAL)
357
358 # mirrored template node checks
359 if self.op.disk_template in constants.DTS_INT_MIRROR:
360 if not self.op.remote_node:
361 raise errors.OpPrereqError("Changing the disk template to a mirrored"
362 " one requires specifying a secondary"
363 " node", errors.ECODE_INVAL)
364 elif self.op.remote_node:
365 self.LogWarning("Changing the disk template to a non-mirrored one,"
366 " the secondary node will be ignored")
367 # the secondary node must be cleared in order to be ignored, otherwise
368 # the operation will fail, in the GenerateDiskTemplate method
369 self.op.remote_node = None
370
371 # file-based template checks
372 if self.op.disk_template in constants.DTS_FILEBASED:
373 self._FillFileDriver()
374
375 # Check NIC modifications
376 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
377 self._VerifyNicModification)
378
379 if self.op.pnode:
380 (self.op.pnode_uuid, self.op.pnode) = \
381 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
382
383 def _CheckAttachDisk(self, params):
384 """Check if disk can be attached to an instance.
385
386 Check if the disk and instance have the same template. Also, check if the
387 disk nodes are visible from the instance.
388 """
389 uuid = params.get("uuid", None)
390 name = params.get(constants.IDISK_NAME, None)
391
392 disk = self.GenericGetDiskInfo(uuid, name)
393 instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
394 if (disk.dev_type != instance_template and
395 instance_template != constants.DT_DISKLESS):
396 raise errors.OpPrereqError("Instance has '%s' template while disk has"
397 " '%s' template" %
398 (instance_template, disk.dev_type),
399 errors.ECODE_INVAL)
400
401 instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
402 if not set(instance_nodes).issubset(set(disk.nodes)):
403 raise errors.OpPrereqError("Disk nodes are %s while the instance's nodes"
404 " are %s" %
405 (disk.nodes, instance_nodes),
406 errors.ECODE_INVAL)
407
408 def ExpandNames(self):
409 self._ExpandAndLockInstance()
410 self.needed_locks[locking.LEVEL_NODEGROUP] = []
411 # Can't even acquire node locks in shared mode as upcoming changes in
412 # Ganeti 2.6 will start to modify the node object on disk conversion
413 self.needed_locks[locking.LEVEL_NODE] = []
414 self.needed_locks[locking.LEVEL_NODE_RES] = []
415 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
416 # Look node group to look up the ipolicy
417 self.share_locks[locking.LEVEL_NODEGROUP] = 1
418 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
419 self.dont_collate_locks[locking.LEVEL_NODE] = True
420 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
421
422 def DeclareLocks(self, level):
423 if level == locking.LEVEL_NODEGROUP:
424 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
425 # Acquire locks for the instance's nodegroups optimistically. Needs
426 # to be verified in CheckPrereq
427 self.needed_locks[locking.LEVEL_NODEGROUP] = \
428 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
429 elif level == locking.LEVEL_NODE:
430 self._LockInstancesNodes()
431 if self.op.disk_template and self.op.remote_node:
432 (self.op.remote_node_uuid, self.op.remote_node) = \
433 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
434 self.op.remote_node)
435 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
436 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
437 # Copy node locks
438 self.needed_locks[locking.LEVEL_NODE_RES] = \
439 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
440
441 def BuildHooksEnv(self):
442 """Build hooks env.
443
444 This runs on the master, primary and secondaries.
445
446 """
447 args = {}
448 if constants.BE_MINMEM in self.be_new:
449 args["minmem"] = self.be_new[constants.BE_MINMEM]
450 if constants.BE_MAXMEM in self.be_new:
451 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
452 if constants.BE_VCPUS in self.be_new:
453 args["vcpus"] = self.be_new[constants.BE_VCPUS]
454 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
455 # information at all.
456
457 if self._new_nics is not None:
458 nics = []
459
460 for nic in self._new_nics:
461 n = copy.deepcopy(nic)
462 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
463 n.nicparams = nicparams
464 nics.append(NICToTuple(self, n))
465
466 args["nics"] = nics
467
468 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
469 if self.op.disk_template:
470 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
471 if self.op.runtime_mem:
472 env["RUNTIME_MEMORY"] = self.op.runtime_mem
473
474 return env
475
476 def BuildHooksNodes(self):
477 """Build hooks nodes.
478
479 """
480 nl = [self.cfg.GetMasterNode()] + \
481 list(self.cfg.GetInstanceNodes(self.instance.uuid))
482 return (nl, nl)
483
484 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
485 old_params, cluster, pnode_uuid):
486
487 update_params_dict = dict([(key, params[key])
488 for key in constants.NICS_PARAMETERS
489 if key in params])
490
491 req_link = update_params_dict.get(constants.NIC_LINK, None)
492 req_mode = update_params_dict.get(constants.NIC_MODE, None)
493
494 new_net_uuid = None
495 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
496 if new_net_uuid_or_name:
497 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
498 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
499
500 if old_net_uuid:
501 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
502
503 if new_net_uuid:
504 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
505 if not netparams:
506 raise errors.OpPrereqError("No netparams found for the network"
507 " %s, probably not connected" %
508 new_net_obj.name, errors.ECODE_INVAL)
509 new_params = dict(netparams)
510 else:
511 new_params = GetUpdatedParams(old_params, update_params_dict)
512
513 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
514
515 new_filled_params = cluster.SimpleFillNIC(new_params)
516 objects.NIC.CheckParameterSyntax(new_filled_params)
517
518 new_mode = new_filled_params[constants.NIC_MODE]
519 if new_mode == constants.NIC_MODE_BRIDGED:
520 bridge = new_filled_params[constants.NIC_LINK]
521 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
522 if msg:
523 msg = "Error checking bridges on node '%s': %s" % \
524 (self.cfg.GetNodeName(pnode_uuid), msg)
525 if self.op.force:
526 self.warn.append(msg)
527 else:
528 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
529
530 elif new_mode == constants.NIC_MODE_ROUTED:
531 ip = params.get(constants.INIC_IP, old_ip)
532 if ip is None and not new_net_uuid:
533 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
534 " on a routed NIC if not attached to a"
535 " network", errors.ECODE_INVAL)
536
537 elif new_mode == constants.NIC_MODE_OVS:
538 # TODO: check OVS link
539 self.LogInfo("OVS links are currently not checked for correctness")
540
541 if constants.INIC_MAC in params:
542 mac = params[constants.INIC_MAC]
543 if mac is None:
544 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
545 errors.ECODE_INVAL)
546 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
547 # otherwise generate the MAC address
548 params[constants.INIC_MAC] = \
549 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
550 else:
551 # or validate/reserve the current one
552 try:
553 self.cfg.ReserveMAC(mac, self.proc.GetECId())
554 except errors.ReservationError:
555 raise errors.OpPrereqError("MAC address '%s' already in use"
556 " in cluster" % mac,
557 errors.ECODE_NOTUNIQUE)
558 elif new_net_uuid != old_net_uuid:
559
560 def get_net_prefix(net_uuid):
561 mac_prefix = None
562 if net_uuid:
563 nobj = self.cfg.GetNetwork(net_uuid)
564 mac_prefix = nobj.mac_prefix
565
566 return mac_prefix
567
568 new_prefix = get_net_prefix(new_net_uuid)
569 old_prefix = get_net_prefix(old_net_uuid)
570 if old_prefix != new_prefix:
571 params[constants.INIC_MAC] = \
572 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
573
574 # if there is a change in (ip, network) tuple
575 new_ip = params.get(constants.INIC_IP, old_ip)
576 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
577 if new_ip:
578 # if IP is pool then require a network and generate one IP
579 if new_ip.lower() == constants.NIC_IP_POOL:
580 if new_net_uuid:
581 try:
582 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
583 except errors.ReservationError:
584 raise errors.OpPrereqError("Unable to get a free IP"
585 " from the address pool",
586 errors.ECODE_STATE)
587 self.LogInfo("Chose IP %s from network %s",
588 new_ip,
589 new_net_obj.name)
590 params[constants.INIC_IP] = new_ip
591 else:
592 raise errors.OpPrereqError("ip=pool, but no network found",
593 errors.ECODE_INVAL)
594 # Reserve new IP if in the new network if any
595 elif new_net_uuid:
596 try:
597 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
598 check=self.op.conflicts_check)
599 self.LogInfo("Reserving IP %s in network %s",
600 new_ip, new_net_obj.name)
601 except errors.ReservationError:
602 raise errors.OpPrereqError("IP %s not available in network %s" %
603 (new_ip, new_net_obj.name),
604 errors.ECODE_NOTUNIQUE)
605 # new network is None so check if new IP is a conflicting IP
606 elif self.op.conflicts_check:
607 CheckForConflictingIp(self, new_ip, pnode_uuid)
608
609 # release old IP if old network is not None
610 if old_ip and old_net_uuid:
611 try:
612 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
613 except errors.AddressPoolError:
614 logging.warning("Release IP %s not contained in network %s",
615 old_ip, old_net_obj.name)
616
617 # there are no changes in (ip, network) tuple and old network is not None
618 elif (old_net_uuid is not None and
619 (req_link is not None or req_mode is not None)):
620 raise errors.OpPrereqError("Not allowed to change link or mode of"
621 " a NIC that is connected to a network",
622 errors.ECODE_INVAL)
623
624 private.params = new_params
625 private.filled = new_filled_params
626
627 def _PreCheckDiskTemplate(self, pnode_info):
628 """CheckPrereq checks related to a new disk template."""
629 # Arguments are passed to avoid configuration lookups
630 pnode_uuid = self.instance.primary_node
631
632 # TODO make sure heterogeneous disk types can be converted.
633 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
634 if disk_template == constants.DT_MIXED:
635 raise errors.OpPrereqError(
636 "Conversion from mixed is not yet supported.")
637
638 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
639 if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
640 raise errors.OpPrereqError(
641 "Conversion from the '%s' disk template is not supported"
642 % self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
643 errors.ECODE_INVAL)
644
645 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
646 raise errors.OpPrereqError("Conversion to the '%s' disk template is"
647 " not supported" % self.op.disk_template,
648 errors.ECODE_INVAL)
649
650 if (self.op.disk_template != constants.DT_EXT and
651 utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
652 raise errors.OpPrereqError("Instance already has disk template %s" %
653 self.op.disk_template, errors.ECODE_INVAL)
654
655 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
656 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
657 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
658 " cluster (enabled templates: %s)" %
659 (self.op.disk_template, enabled_dts),
660 errors.ECODE_STATE)
661
662 default_vg = self.cfg.GetVGName()
663 if (not default_vg and
664 self.op.disk_template not in constants.DTS_NOT_LVM):
665 raise errors.OpPrereqError("Disk template conversions to lvm-based"
666 " instances are not supported by the cluster",
667 errors.ECODE_STATE)
668
669 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
670 msg="cannot change disk template")
671
672 # compute new disks' information
673 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
674 default_vg, self.op.ext_params)
675
676 # mirror node verification
677 if self.op.disk_template in constants.DTS_INT_MIRROR:
678 if self.op.remote_node_uuid == pnode_uuid:
679 raise errors.OpPrereqError("Given new secondary node %s is the same"
680 " as the primary node of the instance" %
681 self.op.remote_node, errors.ECODE_STATE)
682 CheckNodeOnline(self, self.op.remote_node_uuid)
683 CheckNodeNotDrained(self, self.op.remote_node_uuid)
684 CheckNodeVmCapable(self, self.op.remote_node_uuid)
685
686 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
687 snode_group = self.cfg.GetNodeGroup(snode_info.group)
688 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
689 snode_group)
690 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
691 ignore=self.op.ignore_ipolicy)
692 if pnode_info.group != snode_info.group:
693 self.LogWarning("The primary and secondary nodes are in two"
694 " different node groups; the disk parameters"
695 " from the first disk's node group will be"
696 " used")
697
698 # check that the template is in the primary node group's allowed templates
699 pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
700 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
701 pnode_group)
702 allowed_dts = ipolicy[constants.IPOLICY_DTS]
703 if self.op.disk_template not in allowed_dts:
704 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
705 " templates: %s)" % (self.op.disk_template,
706 utils.CommaJoin(allowed_dts)),
707 errors.ECODE_STATE)
708
709 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
710 # Make sure none of the nodes require exclusive storage
711 nodes = [pnode_info]
712 if self.op.disk_template in constants.DTS_INT_MIRROR:
713 assert snode_info
714 nodes.append(snode_info)
715 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
716 if compat.any(map(has_es, nodes)):
717 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
718 " storage is enabled" % (
719 self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
720 self.op.disk_template))
721 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
722
723 # TODO remove setting the disk template after DiskSetParams exists.
724 # node capacity checks
725 if (self.op.disk_template == constants.DT_PLAIN and
726 utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
727 # we ensure that no capacity checks will be made for conversions from
728 # the 'drbd' to the 'plain' disk template
729 pass
730 elif (self.op.disk_template == constants.DT_DRBD8 and
731 utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
732 # for conversions from the 'plain' to the 'drbd' disk template, check
733 # only the remote node's capacity
734 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
735 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
736 elif self.op.disk_template in constants.DTS_LVM:
737 # rest lvm-based capacity checks
738 node_uuids = [pnode_uuid]
739 if self.op.remote_node_uuid:
740 node_uuids.append(self.op.remote_node_uuid)
741 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
742 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
743 elif self.op.disk_template == constants.DT_RBD:
744 # CheckRADOSFreeSpace() is simply a placeholder
745 CheckRADOSFreeSpace()
746 elif self.op.disk_template == constants.DT_EXT:
747 # FIXME: Capacity checks for extstorage template, if exists
748 pass
749 else:
750 # FIXME: Checks about other non lvm-based disk templates
751 pass
752
753 def _PreCheckDisks(self, ispec):
754 """CheckPrereq checks related to disk changes.
755
756 @type ispec: dict
757 @param ispec: instance specs to be updated with the new disks
758
759 """
760 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
761
762 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
763 excl_stor = compat.any(
764 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
765 )
766
767 # Get the group access type
768 node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
769 node_group = self.cfg.GetNodeGroup(node_info.group)
770 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
771
772 group_access_types = dict(
773 (dt, group_disk_params[dt].get(
774 constants.RBD_ACCESS, constants.DISK_KERNELSPACE))
775 for dt in constants.DISK_TEMPLATES)
776
777 # Check disk modifications. This is done here and not in CheckArguments
778 # (as with NICs), because we need to know the instance's disk template
779 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
780 group_access_types)
781 # Don't enforce param types here in case it's an ext disk added. The check
782 # happens inside _VerifyDiskModification.
783 self._CheckMods("disk", self.op.disks, {}, ver_fn)
784
785 self.diskmod = PrepareContainerMods(self.op.disks, None)
786
787 def _PrepareDiskMod(_, disk, params, __):
788 disk.name = params.get(constants.IDISK_NAME, None)
789
790 # Verify disk changes (operating on a copy)
791 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
792 disks = copy.deepcopy(inst_disks)
793 ApplyContainerMods("disk", disks, None, self.diskmod, None, None,
794 _PrepareDiskMod, None, None)
795 utils.ValidateDeviceNames("disk", disks)
796 if len(disks) > constants.MAX_DISKS:
797 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
798 " more" % constants.MAX_DISKS,
799 errors.ECODE_STATE)
800 disk_sizes = [disk.size for disk in inst_disks]
801 disk_sizes.extend(params["size"] for (op, idx, params, private) in
802 self.diskmod if op == constants.DDM_ADD)
803 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
804 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
805
806 # either --online or --offline was passed
807 if self.op.offline is not None:
808 if self.op.offline:
809 msg = "can't change to offline without being down first"
810 else:
811 msg = "can't change to online (down) without being offline first"
812 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
813 msg=msg)
814
815 @staticmethod
816 def _InstanceCommunicationDDM(cfg, instance_communication, instance):
817 """Create a NIC mod that adds or removes the instance
818 communication NIC to a running instance.
819
820 The NICS are dynamically created using the Dynamic Device
821 Modification (DDM). This function produces a NIC modification
822 (mod) that inserts an additional NIC meant for instance
823 communication in or removes an existing instance communication NIC
824 from a running instance, using DDM.
825
826 @type cfg: L{config.ConfigWriter}
827 @param cfg: cluster configuration
828
829 @type instance_communication: boolean
830 @param instance_communication: whether instance communication is
831 enabled or disabled
832
833 @type instance: L{objects.Instance}
834 @param instance: instance to which the NIC mod will be applied to
835
836 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
837 (L{constants.DDM_REMOVE}, -1, parameters) or
838 L{None}
839 @return: DDM mod containing an action to add or remove the NIC, or
840 None if nothing needs to be done
841
842 """
843 nic_name = ComputeInstanceCommunicationNIC(instance.name)
844
845 instance_communication_nic = None
846
847 for nic in instance.nics:
848 if nic.name == nic_name:
849 instance_communication_nic = nic
850 break
851
852 if instance_communication and not instance_communication_nic:
853 action = constants.DDM_ADD
854 params = {constants.INIC_NAME: nic_name,
855 constants.INIC_MAC: constants.VALUE_GENERATE,
856 constants.INIC_IP: constants.NIC_IP_POOL,
857 constants.INIC_NETWORK:
858 cfg.GetInstanceCommunicationNetwork()}
859 elif not instance_communication and instance_communication_nic:
860 action = constants.DDM_REMOVE
861 params = None
862 else:
863 action = None
864 params = None
865
866 if action is not None:
867 return (action, -1, params)
868 else:
869 return None
870
871 def _GetInstanceInfo(self, cluster_hvparams):
872 pnode_uuid = self.instance.primary_node
873 instance_info = self.rpc.call_instance_info(
874 pnode_uuid, self.instance.name, self.instance.hypervisor,
875 cluster_hvparams)
876 return instance_info
877
878 def _CheckHotplug(self):
879 if self.op.hotplug or self.op.hotplug_if_possible:
880 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
881 self.instance)
882 if result.fail_msg:
883 if self.op.hotplug:
884 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
885 prereq=True, ecode=errors.ECODE_STATE)
886 else:
887 self.LogWarning(result.fail_msg)
888 self.op.hotplug = False
889 self.LogInfo("Modification will take place without hotplugging.")
890 else:
891 self.op.hotplug = True
892
893 def _PrepareNicCommunication(self):
894 # add or remove NIC for instance communication
895 if self.op.instance_communication is not None:
896 mod = self._InstanceCommunicationDDM(self.cfg,
897 self.op.instance_communication,
898 self.instance)
899 if mod is not None:
900 self.op.nics.append(mod)
901
902 self.nicmod = PrepareContainerMods(self.op.nics, InstNicModPrivate)
903
904 def _ProcessHVParams(self, node_uuids):
905 if self.op.hvparams:
906 hv_type = self.instance.hypervisor
907 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
908 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
909 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
910
911 # local check
912 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
913 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
914 self.hv_proposed = self.hv_new = hv_new # the new actual values
915 self.hv_inst = i_hvdict # the new dict (without defaults)
916 else:
917 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
918 self.instance.os,
919 self.instance.hvparams)
920 self.hv_new = self.hv_inst = {}
921
922 def _ProcessBeParams(self):
923 if self.op.beparams:
924 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
925 use_none=True)
926 objects.UpgradeBeParams(i_bedict)
927 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
928 be_new = self.cluster.SimpleFillBE(i_bedict)
929 self.be_proposed = self.be_new = be_new # the new actual values
930 self.be_inst = i_bedict # the new dict (without defaults)
931 else:
932 self.be_new = self.be_inst = {}
933 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
934 return self.cluster.FillBE(self.instance)
935
936 def _ValidateCpuParams(self):
937 # CPU param validation -- checking every time a parameter is
938 # changed to cover all cases where either CPU mask or vcpus have
939 # changed
940 if (constants.BE_VCPUS in self.be_proposed and
941 constants.HV_CPU_MASK in self.hv_proposed):
942 cpu_list = \
943 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
944 # Verify mask is consistent with number of vCPUs. Can skip this
945 # test if only 1 entry in the CPU mask, which means same mask
946 # is applied to all vCPUs.
947 if (len(cpu_list) > 1 and
948 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
949 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
950 " CPU mask [%s]" %
951 (self.be_proposed[constants.BE_VCPUS],
952 self.hv_proposed[constants.HV_CPU_MASK]),
953 errors.ECODE_INVAL)
954
955 # Only perform this test if a new CPU mask is given
956 if constants.HV_CPU_MASK in self.hv_new and cpu_list:
957 # Calculate the largest CPU number requested
958 max_requested_cpu = max(map(max, cpu_list))
959 # Check that all of the instance's nodes have enough physical CPUs to
960 # satisfy the requested CPU mask
961 hvspecs = [(self.instance.hypervisor,
962 self.cfg.GetClusterInfo()
963 .hvparams[self.instance.hypervisor])]
964 CheckNodesPhysicalCPUs(self,
965 self.cfg.GetInstanceNodes(self.instance.uuid),
966 max_requested_cpu + 1,
967 hvspecs)
968
969 def _ProcessOsParams(self, node_uuids):
970 # osparams processing
971 instance_os = (self.op.os_name
972 if self.op.os_name and not self.op.force
973 else self.instance.os)
974
975 if self.op.osparams or self.op.osparams_private:
976 public_parms = self.op.osparams or {}
977 private_parms = self.op.osparams_private or {}
978 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
979
980 if dupe_keys:
981 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
982 utils.CommaJoin(dupe_keys))
983
984 self.os_inst = GetUpdatedParams(self.instance.osparams,
985 public_parms)
986 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
987 private_parms)
988
989 CheckOSParams(self, True, node_uuids, instance_os,
990 objects.FillDict(self.os_inst,
991 self.os_inst_private),
992 self.op.force_variant)
993
994 else:
995 self.os_inst = {}
996 self.os_inst_private = {}
997
998 def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
999 #TODO(dynmem): do the appropriate check involving MINMEM
1000 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
1001 self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
1002 mem_check_list = [pnode_uuid]
1003 if self.be_new[constants.BE_AUTO_BALANCE]:
1004 # either we changed auto_balance to yes or it was from before
1005 mem_check_list.extend(
1006 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
1007 instance_info = self._GetInstanceInfo(cluster_hvparams)
1008 hvspecs = [(self.instance.hypervisor,
1009 cluster_hvparams)]
1010 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
1011 hvspecs)
1012 pninfo = nodeinfo[pnode_uuid]
1013 msg = pninfo.fail_msg
1014 if msg:
1015 # Assume the primary node is unreachable and go ahead
1016 self.warn.append("Can't get info from primary node %s: %s" %
1017 (self.cfg.GetNodeName(pnode_uuid), msg))
1018 else:
1019 (_, _, (pnhvinfo, )) = pninfo.payload
1020 if not isinstance(pnhvinfo.get("memory_free", None), int):
1021 self.warn.append("Node data from primary node %s doesn't contain"
1022 " free memory information" %
1023 self.cfg.GetNodeName(pnode_uuid))
1024 elif instance_info.fail_msg:
1025 self.warn.append("Can't get instance runtime information: %s" %
1026 instance_info.fail_msg)
1027 else:
1028 if instance_info.payload:
1029 current_mem = int(instance_info.payload["memory"])
1030 else:
1031 # Assume instance not running
1032 # (there is a slight race condition here, but it's not very
1033 # probable, and we have no other way to check)
1034 # TODO: Describe race condition
1035 current_mem = 0
1036 #TODO(dynmem): do the appropriate check involving MINMEM
1037 miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem -
1038 pnhvinfo["memory_free"])
1039 if miss_mem > 0:
1040 raise errors.OpPrereqError("This change will prevent the instance"
1041 " from starting, due to %d MB of memory"
1042 " missing on its primary node" %
1043 miss_mem, errors.ECODE_NORES)
1044
1045 if self.be_new[constants.BE_AUTO_BALANCE]:
1046 secondary_nodes = \
1047 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1048 for node_uuid, nres in nodeinfo.items():
1049 if node_uuid not in secondary_nodes:
1050 continue
1051 nres.Raise("Can't get info from secondary node %s" %
1052 self.cfg.GetNodeName(node_uuid), prereq=True,
1053 ecode=errors.ECODE_STATE)
1054 (_, _, (nhvinfo, )) = nres.payload
1055 if not isinstance(nhvinfo.get("memory_free", None), int):
1056 raise errors.OpPrereqError("Secondary node %s didn't return free"
1057 " memory information" %
1058 self.cfg.GetNodeName(node_uuid),
1059 errors.ECODE_STATE)
1060 #TODO(dynmem): do the appropriate check involving MINMEM
1061 elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
1062 raise errors.OpPrereqError("This change will prevent the instance"
1063 " from failover to its secondary node"
1064 " %s, due to not enough memory" %
1065 self.cfg.GetNodeName(node_uuid),
1066 errors.ECODE_STATE)
1067
1068 if self.op.runtime_mem:
1069 remote_info = self.rpc.call_instance_info(
1070 self.instance.primary_node, self.instance.name,
1071 self.instance.hypervisor,
1072 cluster_hvparams)
1073 remote_info.Raise("Error checking node %s" %
1074 self.cfg.GetNodeName(self.instance.primary_node),
1075 prereq=True)
1076 if not remote_info.payload: # not running already
1077 raise errors.OpPrereqError("Instance %s is not running" %
1078 self.instance.name, errors.ECODE_STATE)
1079
1080 current_memory = remote_info.payload["memory"]
1081 if (not self.op.force and
1082 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
1083 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
1084 raise errors.OpPrereqError("Instance %s must have memory between %d"
1085 " and %d MB of memory unless --force is"
1086 " given" %
1087 (self.instance.name,
1088 self.be_proposed[constants.BE_MINMEM],
1089 self.be_proposed[constants.BE_MAXMEM]),
1090 errors.ECODE_INVAL)
1091
1092 delta = self.op.runtime_mem - current_memory
1093 if delta > 0:
1094 CheckNodeFreeMemory(
1095 self, self.instance.primary_node,
1096 "ballooning memory for instance %s" % self.instance.name, delta,
1097 self.instance.hypervisor,
1098 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1099
1100 def CheckPrereq(self):
1101 """Check prerequisites.
1102
1103 This only checks the instance list against the existing names.
1104
1105 """
1106 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
1107 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1108 self.cluster = self.cfg.GetClusterInfo()
1109 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
1110
1111 self.op.disks = self._LookupDiskMods()
1112
1113 assert self.instance is not None, \
1114 "Cannot retrieve locked instance %s" % self.op.instance_name
1115
1116 self.warn = []
1117
1118 if (self.op.pnode_uuid is not None and
1119 self.op.pnode_uuid != self.instance.primary_node and
1120 not self.op.force):
1121 instance_info = self._GetInstanceInfo(cluster_hvparams)
1122
1123 if instance_info.fail_msg:
1124 self.warn.append("Can't get instance runtime information: %s" %
1125 instance_info.fail_msg)
1126 elif instance_info.payload:
1127 raise errors.OpPrereqError(
1128 "Instance is still running on %s" %
1129 self.cfg.GetNodeName(self.instance.primary_node),
1130 errors.ECODE_STATE)
1131 pnode_uuid = self.instance.primary_node
1132 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
1133
1134 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
1135 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
1136
1137 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
1138 group_info = self.cfg.GetNodeGroup(pnode_info.group)
1139
1140 # dictionary with instance information after the modification
1141 ispec = {}
1142
1143 self._CheckHotplug()
1144
1145 self._PrepareNicCommunication()
1146
1147 # disks processing
1148 assert not (self.op.disk_template and self.op.disks), \
1149 "Can't modify disk template and apply disk changes at the same time"
1150
1151 if self.op.disk_template:
1152 self._PreCheckDiskTemplate(pnode_info)
1153
1154 self._PreCheckDisks(ispec)
1155
1156 self._ProcessHVParams(node_uuids)
1157 be_old = self._ProcessBeParams()
1158
1159 self._ValidateCpuParams()
1160 self._ProcessOsParams(node_uuids)
1161 self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)
1162
1163 # make self.cluster visible in the functions below
1164 cluster = self.cluster
1165
1166 def _PrepareNicCreate(_, params, private):
1167 self._PrepareNicModification(params, private, None, None,
1168 {}, cluster, pnode_uuid)
1169 return (None, None)
1170
1171 def _PrepareNicAttach(_, __, ___):
1172 raise errors.OpPrereqError("Attach operation is not supported for NICs",
1173 errors.ECODE_INVAL)
1174
1175 def _PrepareNicMod(_, nic, params, private):
1176 self._PrepareNicModification(params, private, nic.ip, nic.network,
1177 nic.nicparams, cluster, pnode_uuid)
1178 return None
1179
1180 def _PrepareNicRemove(_, params, __):
1181 ip = params.ip
1182 net = params.network
1183 if net is not None and ip is not None:
1184 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
1185
1186 def _PrepareNicDetach(_, __, ___):
1187 raise errors.OpPrereqError("Detach operation is not supported for NICs",
1188 errors.ECODE_INVAL)
1189
1190 # Verify NIC changes (operating on copy)
1191 nics = [nic.Copy() for nic in self.instance.nics]
1192 ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate,
1193 _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,
1194 _PrepareNicDetach)
1195 if len(nics) > constants.MAX_NICS:
1196 raise errors.OpPrereqError("Instance has too many network interfaces"
1197 " (%d), cannot add more" % constants.MAX_NICS,
1198 errors.ECODE_STATE)
1199
1200 # Pre-compute NIC changes (necessary to use result in hooks)
1201 self._nic_chgdesc = []
1202 if self.nicmod:
1203 # Operate on copies as this is still in prereq
1204 nics = [nic.Copy() for nic in self.instance.nics]
1205 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
1206 self._CreateNewNic, None, self._ApplyNicMods,
1207 self._RemoveNic, None)
1208 # Verify that NIC names are unique and valid
1209 utils.ValidateDeviceNames("NIC", nics)
1210 self._new_nics = nics
1211 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
1212 else:
1213 self._new_nics = None
1214 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
1215
1216 if not self.op.ignore_ipolicy:
1217 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
1218 group_info)
1219
1220 # Fill ispec with backend parameters
1221 ispec[constants.ISPEC_SPINDLE_USE] = \
1222 self.be_new.get(constants.BE_SPINDLE_USE, None)
1223 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
1224 None)
1225
1226 # Copy ispec to verify parameters with min/max values separately
1227 if self.op.disk_template:
1228 count = ispec[constants.ISPEC_DISK_COUNT]
1229 new_disk_types = [self.op.disk_template] * count
1230 else:
1231 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1232 add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)
1233 dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1234 if dev_type == constants.DT_DISKLESS and add_disk_count != 0:
1235 raise errors.ProgrammerError(
1236 "Conversion from diskless instance not possible and should have"
1237 " been caught")
1238
1239 new_disk_types = ([d.dev_type for d in old_disks] +
1240 [dev_type] * add_disk_count)
1241 ispec_max = ispec.copy()
1242 ispec_max[constants.ISPEC_MEM_SIZE] = \
1243 self.be_new.get(constants.BE_MAXMEM, None)
1244 res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
1245 new_disk_types)
1246 ispec_min = ispec.copy()
1247 ispec_min[constants.ISPEC_MEM_SIZE] = \
1248 self.be_new.get(constants.BE_MINMEM, None)
1249 res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
1250 new_disk_types)
1251
1252 if (res_max or res_min):
1253 # FIXME: Improve error message by including information about whether
1254 # the upper or lower limit of the parameter fails the ipolicy.
1255 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1256 (group_info, group_info.name,
1257 utils.CommaJoin(set(res_max + res_min))))
1258 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1259
1260 def _ConvertInstanceDisks(self, feedback_fn):
1261 """Converts the disks of an instance to another type.
1262
1263 This function converts the disks of an instance. It supports
1264 conversions among all the available disk types except conversions
1265 between the LVM-based disk types, that use their separate code path.
1266 Also, this method does not support conversions that include the 'diskless'
1267 template and those targeting the 'blockdev' template.
1268
1269 @type feedback_fn: callable
1270 @param feedback_fn: function used to send feedback back to the caller
1271
1272 @rtype: NoneType
1273 @return: None
1274 @raise errors.OpPrereqError: in case of failure
1275
1276 """
1277 template_info = self.op.disk_template
1278 if self.op.disk_template == constants.DT_EXT:
1279 template_info = ":".join([self.op.disk_template,
1280 self.op.ext_params["provider"]])
1281
1282 old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1283 feedback_fn("Converting disk template from '%s' to '%s'" %
1284 (old_template, template_info))
1285
1286 assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or
1287 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
1288 ("Unsupported disk template conversion from '%s' to '%s'" %
1289 (old_template, self.op.disk_template))
1290
1291 pnode_uuid = self.instance.primary_node
1292 snode_uuid = []
1293 if self.op.remote_node_uuid:
1294 snode_uuid = [self.op.remote_node_uuid]
1295
1296 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1297
1298 feedback_fn("Generating new '%s' disk template..." % template_info)
1299 file_storage_dir = CalculateFileStorageDir(
1300 self.op.disk_template, self.cfg, self.instance.name,
1301 file_storage_dir=self.op.file_storage_dir)
1302 new_disks = GenerateDiskTemplate(self,
1303 self.op.disk_template,
1304 self.instance.uuid,
1305 pnode_uuid,
1306 snode_uuid,
1307 self.disks_info,
1308 file_storage_dir,
1309 self.op.file_driver,
1310 0,
1311 feedback_fn,
1312 self.diskparams)
1313
1314 # Create the new block devices for the instance.
1315 feedback_fn("Creating new empty disks of type '%s'..." % template_info)
1316 try:
1317 CreateDisks(self, self.instance, disk_template=self.op.disk_template,
1318 disks=new_disks)
1319 except errors.OpExecError:
1320 self.LogWarning("Device creation failed")
1321 for disk in new_disks:
1322 self.cfg.ReleaseDRBDMinors(disk.uuid)
1323 raise
1324
1325 # Transfer the data from the old to the newly created disks of the instance.
1326 feedback_fn("Populating the new empty disks of type '%s'..." %
1327 template_info)
1328 for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
1329 feedback_fn(" - copying data from disk %s (%s), size %s" %
1330 (idx, old.dev_type,
1331 utils.FormatUnit(new.size, "h")))
1332 if old.dev_type == constants.DT_DRBD8:
1333 old = old.children[0]
1334 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
1335 (new, self.instance))
1336 msg = result.fail_msg
1337 if msg:
1338 # A disk failed to copy. Abort the conversion operation and rollback
1339 # the modifications to the previous state. The instance will remain
1340 # intact.
1341 if self.op.disk_template == constants.DT_DRBD8:
1342 new = new.children[0]
1343 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
1344 (old.logical_id[1], new.logical_id[1]))
1345 try:
1346 self.LogInfo("Some disks failed to copy")
1347 self.LogInfo("The instance will not be affected, aborting operation")
1348 self.LogInfo("Removing newly created disks of type '%s'..." %
1349 template_info)
1350 RemoveDisks(self, self.instance, disks=new_disks)
1351 self.LogInfo("Newly created disks removed successfully")
1352 finally:
1353 for disk in new_disks:
1354 self.cfg.ReleaseDRBDMinors(disk.uuid)
1355 result.Raise("Error while converting the instance's template")
1356
1357 # In case of DRBD disk, return its port to the pool
1358 for disk in old_disks:
1359 if disk.dev_type == constants.DT_DRBD8:
1360 tcp_port = disk.logical_id[2]
1361 self.cfg.AddTcpUdpPort(tcp_port)
1362
1363 # Remove old disks from the instance.
1364 feedback_fn("Detaching old disks (%s) from the instance and removing"
1365 " them from cluster config" % old_template)
1366 for old_disk in old_disks:
1367 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1368
1369 # Attach the new disks to the instance.
1370 feedback_fn("Adding new disks (%s) to cluster config and attaching"
1371 " them to the instance" % template_info)
1372 for (idx, new_disk) in enumerate(new_disks):
1373 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1374
1375 # Re-read the instance from the configuration.
1376 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1377
1378 # Release node locks while waiting for sync and disks removal.
1379 ReleaseLocks(self, locking.LEVEL_NODE)
1380
1381 disk_abort = not WaitForSync(self, self.instance,
1382 oneshot=not self.op.wait_for_sync)
1383 if disk_abort:
1384 raise errors.OpExecError("There are some degraded disks for"
1385 " this instance, please cleanup manually")
1386
1387 feedback_fn("Removing old block devices of type '%s'..." % old_template)
1388 RemoveDisks(self, self.instance, disks=old_disks)
1389
1390 # Node resource locks will be released by the caller.
1391
1392 def _ConvertPlainToDrbd(self, feedback_fn):
1393 """Converts an instance from plain to drbd.
1394
1395 """
1396 feedback_fn("Converting disk template from 'plain' to 'drbd'")
1397
1398 pnode_uuid = self.instance.primary_node
1399 snode_uuid = self.op.remote_node_uuid
1400 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1401
1402 assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN])
1403
1404 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
1405 self.instance.uuid, pnode_uuid,
1406 [snode_uuid], self.disks_info,
1407 None, None, 0,
1408 feedback_fn, self.diskparams)
1409 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
1410 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
1411 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
1412 info = GetInstanceInfoText(self.instance)
1413 feedback_fn("Creating additional volumes...")
1414 # first, create the missing data and meta devices
1415 for disk in anno_disks:
1416 # unfortunately this is... not too nice
1417 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
1418 info, True, p_excl_stor)
1419 for child in disk.children:
1420 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
1421 s_excl_stor)
1422 # at this stage, all new LVs have been created, we can rename the
1423 # old ones
1424 feedback_fn("Renaming original volumes...")
1425 rename_list = [(o, n.children[0].logical_id)
1426 for (o, n) in zip(old_disks, new_disks)]
1427 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
1428 result.Raise("Failed to rename original LVs")
1429
1430 feedback_fn("Initializing DRBD devices...")
1431 # all child devices are in place, we can now create the DRBD devices
1432 try:
1433 for disk in anno_disks:
1434 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
1435 (snode_uuid, s_excl_stor)]:
1436 f_create = node_uuid == pnode_uuid
1437 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
1438 f_create, excl_stor)
1439 except errors.GenericError, e:
1440 feedback_fn("Initializing of DRBD devices failed;"
1441 " renaming back original volumes...")
1442 rename_back_list = [(n.children[0], o.logical_id)
1443 for (n, o) in zip(new_disks, old_disks)]
1444 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
1445 result.Raise("Failed to rename LVs back after error %s" % str(e))
1446 raise
1447
1448 # Remove the old disks from the instance
1449 for old_disk in old_disks:
1450 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1451
1452 # Attach the new disks to the instance
1453 for (idx, new_disk) in enumerate(new_disks):
1454 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1455
1456 # re-read the instance from the configuration
1457 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1458
1459 # Release node locks while waiting for sync
1460 ReleaseLocks(self, locking.LEVEL_NODE)
1461
1462 # disks are created, waiting for sync
1463 disk_abort = not WaitForSync(self, self.instance,
1464 oneshot=not self.op.wait_for_sync)
1465 if disk_abort:
1466 raise errors.OpExecError("There are some degraded disks for"
1467 " this instance, please cleanup manually")
1468
1469 # Node resource locks will be released by caller
1470
1471 def _ConvertDrbdToPlain(self, feedback_fn):
1472 """Converts an instance from drbd to plain.
1473
1474 """
1475 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1476 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1477 assert len(secondary_nodes) == 1
1478 assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])
1479
1480 feedback_fn("Converting disk template from 'drbd' to 'plain'")
1481
1482 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
1483 new_disks = [d.children[0] for d in disks]
1484
1485 # copy over size, mode and name and set the correct nodes
1486 for parent, child in zip(old_disks, new_disks):
1487 child.size = parent.size
1488 child.mode = parent.mode
1489 child.name = parent.name
1490 child.nodes = [self.instance.primary_node]
1491
1492 # this is a DRBD disk, return its port to the pool
1493 for disk in old_disks:
1494 tcp_port = disk.logical_id[2]
1495 self.cfg.AddTcpUdpPort(tcp_port)
1496
1497 # Remove the old disks from the instance
1498 for old_disk in old_disks:
1499 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1500
1501 # Attach the new disks to the instance
1502 for (idx, new_disk) in enumerate(new_disks):
1503 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1504
1505 # re-read the instance from the configuration
1506 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1507
1508 # Release locks in case removing disks takes a while
1509 ReleaseLocks(self, locking.LEVEL_NODE)
1510
1511 feedback_fn("Removing volumes on the secondary node...")
1512 RemoveDisks(self, self.instance, disks=old_disks,
1513 target_node_uuid=secondary_nodes[0])
1514
1515 feedback_fn("Removing unneeded volumes on the primary node...")
1516 meta_disks = []
1517 for idx, disk in enumerate(old_disks):
1518 meta_disks.append(disk.children[1])
1519 RemoveDisks(self, self.instance, disks=meta_disks)
1520
1521 def _HotplugDevice(self, action, dev_type, device, extra, seq):
1522 self.LogInfo("Trying to hotplug device...")
1523 msg = "hotplug:"
1524 result = self.rpc.call_hotplug_device(self.instance.primary_node,
1525 self.instance, action, dev_type,
1526 (device, self.instance),
1527 extra, seq)
1528 if result.fail_msg:
1529 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
1530 self.LogInfo("Continuing execution..")
1531 msg += "failed"
1532 else:
1533 self.LogInfo("Hotplug done.")
1534 msg += "done"
1535 return msg
1536
1537 def _FillFileDriver(self):
1538 if not self.op.file_driver:
1539 self.op.file_driver = constants.FD_DEFAULT
1540 elif self.op.file_driver not in constants.FILE_DRIVER:
1541 raise errors.OpPrereqError("Invalid file driver name '%s'" %
1542 self.op.file_driver, errors.ECODE_INVAL)
1543
1544 def _GenerateDiskTemplateWrapper(self, idx, disk_type, params):
1545 file_path = CalculateFileStorageDir(
1546 disk_type, self.cfg, self.instance.name,
1547 file_storage_dir=self.op.file_storage_dir)
1548
1549 self._FillFileDriver()
1550
1551 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1552 return \
1553 GenerateDiskTemplate(self, disk_type, self.instance.uuid,
1554 self.instance.primary_node, secondary_nodes,
1555 [params], file_path, self.op.file_driver, idx,
1556 self.Log, self.diskparams)[0]
1557
1558 def _CreateNewDisk(self, idx, params, _):
1559 """Creates a new disk.
1560
1561 """
1562 # add a new disk
1563 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1564 disk = self._GenerateDiskTemplateWrapper(idx, disk_template,
1565 params)
1566 new_disks = CreateDisks(self, self.instance, disks=[disk])
1567 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
1568
1569 # re-read the instance from the configuration
1570 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1571
1572 if self.cluster.prealloc_wipe_disks:
1573 # Wipe new disk
1574 WipeOrCleanupDisks(self, self.instance,
1575 disks=[(idx, disk, 0)],
1576 cleanup=new_disks)
1577
1578 changes = [
1579 ("disk/%d" % idx,
1580 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
1581 ]
1582 if self.op.hotplug:
1583 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
1584 (disk, self.instance),
1585 self.instance, True, idx)
1586 if result.fail_msg:
1587 changes.append(("disk/%d" % idx, "assemble:failed"))
1588 self.LogWarning("Can't assemble newly created disk %d: %s",
1589 idx, result.fail_msg)
1590 else:
1591 _, link_name, uri = result.payload
1592 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1593 constants.HOTPLUG_TARGET_DISK,
1594 disk, (link_name, uri), idx)
1595 changes.append(("disk/%d" % idx, msg))
1596
1597 return (disk, changes)
1598
1599 def _PostAddDisk(self, _, disk):
1600 if not WaitForSync(self, self.instance, disks=[disk],
1601 oneshot=not self.op.wait_for_sync):
1602 raise errors.OpExecError("Failed to sync disks of %s" %
1603 self.instance.name)
1604
1605 # the disk is active at this point, so deactivate it if the instance disks
1606 # are supposed to be inactive
1607 if not self.instance.disks_active:
1608 ShutdownInstanceDisks(self, self.instance, disks=[disk])
1609
1610 def _AttachDisk(self, idx, params, _):
1611 """Attaches an existing disk to an instance.
1612
1613 """
1614 uuid = params.get("uuid", None)
1615 name = params.get(constants.IDISK_NAME, None)
1616
1617 disk = self.GenericGetDiskInfo(uuid, name)
1618
1619 # Rename disk before attaching (if disk is filebased)
1620 if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH):
1621 # Add disk size/mode, else GenerateDiskTemplate will not work.
1622 params[constants.IDISK_SIZE] = disk.size
1623 params[constants.IDISK_MODE] = str(disk.mode)
1624 dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)
1625 new_logical_id = dummy_disk.logical_id
1626 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1627 [(disk, new_logical_id)])
1628 result.Raise("Failed before attach")
1629 self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)
1630 disk.logical_id = new_logical_id
1631
1632 # Attach disk to instance
1633 self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)
1634
1635 # re-read the instance from the configuration
1636 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1637
1638 changes = [
1639 ("disk/%d" % idx,
1640 "attach:size=%s,mode=%s" % (disk.size, disk.mode)),
1641 ]
1642
1643 disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,
1644 disks=[disk])
1645 if not disks_ok:
1646 changes.append(("disk/%d" % idx, "assemble:failed"))
1647 return disk, changes
1648
1649 if self.op.hotplug:
1650 _, link_name, uri = payloads[0]
1651 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1652 constants.HOTPLUG_TARGET_DISK,
1653 disk, (link_name, uri), idx)
1654 changes.append(("disk/%d" % idx, msg))
1655
1656 return (disk, changes)
1657
1658 def _ModifyDisk(self, idx, disk, params, _):
1659 """Modifies a disk.
1660
1661 """
1662 changes = []
1663 if constants.IDISK_MODE in params:
1664 disk.mode = params.get(constants.IDISK_MODE)
1665 changes.append(("disk.mode/%d" % idx, disk.mode))
1666
1667 if constants.IDISK_NAME in params:
1668 disk.name = params.get(constants.IDISK_NAME)
1669 changes.append(("disk.name/%d" % idx, disk.name))
1670
1671 # Modify arbitrary params in case instance template is ext
1672
1673 for key, value in params.iteritems():
1674 if (key not in constants.MODIFIABLE_IDISK_PARAMS and
1675 disk.dev_type == constants.DT_EXT):
1676 # stolen from GetUpdatedParams: default means reset/delete
1677 if value.lower() == constants.VALUE_DEFAULT:
1678 try:
1679 del disk.params[key]
1680 except KeyError:
1681 pass
1682 else:
1683 disk.params[key] = value
1684 changes.append(("disk.params:%s/%d" % (key, idx), value))
1685
1686 # Update disk object
1687 self.cfg.Update(disk, self.feedback_fn)
1688
1689 return changes
1690
1691 def _RemoveDisk(self, idx, root, _):
1692 """Removes a disk.
1693
1694 """
1695 hotmsg = ""
1696 if self.op.hotplug:
1697 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1698 constants.HOTPLUG_TARGET_DISK,
1699 root, None, idx)
1700 ShutdownInstanceDisks(self, self.instance, [root])
1701
1702 RemoveDisks(self, self.instance, disks=[root])
1703
1704 # if this is a DRBD disk, return its port to the pool
1705 if root.dev_type in constants.DTS_DRBD:
1706 self.cfg.AddTcpUdpPort(root.logical_id[2])
1707
1708 # Remove disk from config
1709 self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
1710
1711 # re-read the instance from the configuration
1712 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1713
1714 return hotmsg
1715
1716 def _DetachDisk(self, idx, root, _):
1717 """Detaches a disk from an instance.
1718
1719 """
1720 hotmsg = ""
1721 if self.op.hotplug:
1722 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1723 constants.HOTPLUG_TARGET_DISK,
1724 root, None, idx)
1725
1726 # Always shutdown the disk before detaching.
1727 ShutdownInstanceDisks(self, self.instance, [root])
1728
1729 # Rename detached disk.
1730 #
1731 # Transform logical_id from:
1732 # <file_storage_dir>/<instance_name>/<disk_name>
1733 # to
1734 # <file_storage_dir>/<disk_name>
1735 if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
1736 file_driver = root.logical_id[0]
1737 instance_path, disk_name = os.path.split(root.logical_id[1])
1738 new_path = os.path.join(os.path.dirname(instance_path), disk_name)
1739 new_logical_id = (file_driver, new_path)
1740 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1741 [(root, new_logical_id)])
1742 result.Raise("Failed before detach")
1743 # Update logical_id
1744 self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)
1745
1746 # Remove disk from config
1747 self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)
1748
1749 # re-read the instance from the configuration
1750 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1751
1752 return hotmsg
1753
1754 def _CreateNewNic(self, idx, params, private):
1755 """Creates data structure for a new network interface.
1756
1757 """
1758 mac = params[constants.INIC_MAC]
1759 ip = params.get(constants.INIC_IP, None)
1760 net = params.get(constants.INIC_NETWORK, None)
1761 name = params.get(constants.INIC_NAME, None)
1762 net_uuid = self.cfg.LookupNetwork(net)
1763 #TODO: not private.filled?? can a nic have no nicparams??
1764 nicparams = private.filled
1765 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
1766 nicparams=nicparams)
1767 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1768
1769 changes = [
1770 ("nic.%d" % idx,
1771 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
1772 (mac, ip, private.filled[constants.NIC_MODE],
1773 private.filled[constants.NIC_LINK], net)),
1774 ]
1775
1776 if self.op.hotplug:
1777 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1778 constants.HOTPLUG_TARGET_NIC,
1779 nobj, None, idx)
1780 changes.append(("nic.%d" % idx, msg))
1781
1782 return (nobj, changes)
1783
1784 def _ApplyNicMods(self, idx, nic, params, private):
1785 """Modifies a network interface.
1786
1787 """
1788 changes = []
1789
1790 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
1791 if key in params:
1792 changes.append(("nic.%s/%d" % (key, idx), params[key]))
1793 setattr(nic, key, params[key])
1794
1795 new_net = params.get(constants.INIC_NETWORK, nic.network)
1796 new_net_uuid = self.cfg.LookupNetwork(new_net)
1797 if new_net_uuid != nic.network:
1798 changes.append(("nic.network/%d" % idx, new_net))
1799 nic.network = new_net_uuid
1800
1801 if private.filled:
1802 nic.nicparams = private.filled
1803
1804 for (key, val) in nic.nicparams.items():
1805 changes.append(("nic.%s/%d" % (key, idx), val))
1806
1807 if self.op.hotplug:
1808 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
1809 constants.HOTPLUG_TARGET_NIC,
1810 nic, None, idx)
1811 changes.append(("nic/%d" % idx, msg))
1812
1813 return changes
1814
1815 def _RemoveNic(self, idx, nic, _):
1816 if self.op.hotplug:
1817 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1818 constants.HOTPLUG_TARGET_NIC,
1819 nic, None, idx)
1820
1821 def Exec(self, feedback_fn):
1822 """Modifies an instance.
1823
1824 All parameters take effect only at the next restart of the instance.
1825
1826 """
1827 self.feedback_fn = feedback_fn
1828 # Process here the warnings from CheckPrereq, as we don't have a
1829 # feedback_fn there.
1830 # TODO: Replace with self.LogWarning
1831 for warn in self.warn:
1832 feedback_fn("WARNING: %s" % warn)
1833
1834 assert ((self.op.disk_template is None) ^
1835 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
1836 "Not owning any node resource locks"
1837
1838 result = []
1839
1840 # New primary node
1841 if self.op.pnode_uuid:
1842 self.instance.primary_node = self.op.pnode_uuid
1843
1844 # runtime memory
1845 if self.op.runtime_mem:
1846 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
1847 self.instance,
1848 self.op.runtime_mem)
1849 rpcres.Raise("Cannot modify instance runtime memory")
1850 result.append(("runtime_memory", self.op.runtime_mem))
1851
1852 # Apply disk changes
1853 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1854 ApplyContainerMods("disk", inst_disks, result, self.diskmod,
1855 self._CreateNewDisk, self._AttachDisk, self._ModifyDisk,
1856 self._RemoveDisk, self._DetachDisk,
1857 post_add_fn=self._PostAddDisk)
1858
1859 if self.op.disk_template:
1860 if __debug__:
1861 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
1862 if self.op.remote_node_uuid:
1863 check_nodes.add(self.op.remote_node_uuid)
1864 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
1865 owned = self.owned_locks(level)
1866 assert not (check_nodes - owned), \
1867 ("Not owning the correct locks, owning %r, expected at least %r" %
1868 (owned, check_nodes))
1869
1870 r_shut = ShutdownInstanceDisks(self, self.instance)
1871 if not r_shut:
1872 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
1873 " proceed with disk template conversion")
1874 #TODO make heterogeneous conversions work
1875 mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
1876 self.op.disk_template)
1877 try:
1878 if mode in self._DISK_CONVERSIONS:
1879 self._DISK_CONVERSIONS[mode](self, feedback_fn)
1880 else:
1881 self._ConvertInstanceDisks(feedback_fn)
1882 except:
1883 for disk in inst_disks:
1884 self.cfg.ReleaseDRBDMinors(disk.uuid)
1885 raise
1886 result.append(("disk_template", self.op.disk_template))
1887
1888 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
1889 assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \
1890 ("Expected disk template '%s', found '%s'" %
1891 (self.op.disk_template,
1892 self.cfg.GetInstanceDiskTemplate(self.instance.uuid)))
1893
1894 # Release node and resource locks if there are any (they might already have
1895 # been released during disk conversion)
1896 ReleaseLocks(self, locking.LEVEL_NODE)
1897 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1898
1899 # Apply NIC changes
1900 if self._new_nics is not None:
1901 self.instance.nics = self._new_nics
1902 result.extend(self._nic_chgdesc)
1903
1904 # hvparams changes
1905 if self.op.hvparams:
1906 self.instance.hvparams = self.hv_inst
1907 for key, val in self.op.hvparams.iteritems():
1908 result.append(("hv/%s" % key, val))
1909
1910 # beparams changes
1911 if self.op.beparams:
1912 self.instance.beparams = self.be_inst
1913 for key, val in self.op.beparams.iteritems():
1914 result.append(("be/%s" % key, val))
1915
1916 # OS change
1917 if self.op.os_name:
1918 self.instance.os = self.op.os_name
1919
1920 # osparams changes
1921 if self.op.osparams:
1922 self.instance.osparams = self.os_inst
1923 for key, val in self.op.osparams.iteritems():
1924 result.append(("os/%s" % key, val))
1925
1926 if self.op.osparams_private:
1927 self.instance.osparams_private = self.os_inst_private
1928 for key, val in self.op.osparams_private.iteritems():
1929 # Show the Private(...) blurb.
1930 result.append(("os_private/%s" % key, repr(val)))
1931
1932 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
1933
1934 if self.op.offline is None:
1935 # Ignore
1936 pass
1937 elif self.op.offline:
1938 # Mark instance as offline
1939 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
1940 result.append(("admin_state", constants.ADMINST_OFFLINE))
1941 else:
1942 # Mark instance as online, but stopped
1943 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
1944 result.append(("admin_state", constants.ADMINST_DOWN))
1945
1946 UpdateMetadata(feedback_fn, self.rpc, self.instance)
1947
1948 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
1949 self.owned_locks(locking.LEVEL_NODE)), \
1950 "All node locks should have been released by now"
1951
1952 return result
1953
1954 _DISK_CONVERSIONS = {
1955 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
1956 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
1957 }