4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """Logical unit setting parameters of a single instance."""
36 from ganeti
import compat
37 from ganeti
import constants
38 from ganeti
import errors
40 from ganeti
import hypervisor
41 from ganeti
import locking
42 from ganeti
import netutils
43 from ganeti
import objects
44 from ganeti
import utils
45 import ganeti
.rpc
.node
as rpc
47 from ganeti
.cmdlib
.base
import LogicalUnit
49 from ganeti
.cmdlib
.common
import INSTANCE_DOWN
, \
50 INSTANCE_NOT_RUNNING
, CheckNodeOnline
, \
51 CheckParamsNotGlobal
, \
52 IsExclusiveStorageEnabledNode
, CheckHVParams
, CheckOSParams
, \
53 GetUpdatedParams
, CheckInstanceState
, ExpandNodeUuidAndName
, \
54 IsValidDiskAccessModeCombination
, AnnotateDiskParams
55 from ganeti
.cmdlib
.instance_storage
import CalculateFileStorageDir
, \
56 CheckDiskExtProvider
, CheckNodesFreeDiskPerVG
, CheckRADOSFreeSpace
, \
57 CheckSpindlesExclusiveStorage
, ComputeDiskSizePerVG
, ComputeDisksInfo
, \
58 CreateDisks
, CreateSingleBlockDev
, GenerateDiskTemplate
, \
59 IsExclusiveStorageEnabledNodeUuid
, ShutdownInstanceDisks
, \
60 WaitForSync
, WipeOrCleanupDisks
, AssembleInstanceDisks
61 from ganeti
.cmdlib
.instance_utils
import BuildInstanceHookEnvByObject
, \
62 NICToTuple
, CheckNodeNotDrained
, CopyLockList
, \
63 ReleaseLocks
, CheckNodeVmCapable
, CheckTargetNodeIPolicy
, \
64 GetInstanceInfoText
, RemoveDisks
, CheckNodeFreeMemory
, \
65 UpdateMetadata
, CheckForConflictingIp
, \
66 PrepareContainerMods
, ComputeInstanceCommunicationNIC
, \
67 ApplyContainerMods
, ComputeIPolicyInstanceSpecViolation
, \
68 CheckNodesPhysicalCPUs
69 import ganeti
.masterd
.instance
72 class InstNicModPrivate(object):
73 """Data structure for network interface modifications.
75 Used by L{LUInstanceSetParams}.
83 class LUInstanceSetParams(LogicalUnit
):
84 """Modifies an instances's parameters.
87 HPATH
= "instance-modify"
88 HTYPE
= constants
.HTYPE_INSTANCE
91 def GenericGetDiskInfo(self
, uuid
=None, name
=None):
92 """Find a disk object using the provided params.
94 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
95 config functions to retrieve the disk info based on these arguments.
97 In case of an error, raise the appropriate exceptions.
100 disk
= self
.cfg
.GetDiskInfo(uuid
)
102 raise errors
.OpPrereqError("No disk was found with this UUID: %s" %
103 uuid
, errors
.ECODE_INVAL
)
105 disk
= self
.cfg
.GetDiskInfoByName(name
)
107 raise errors
.OpPrereqError("No disk was found with this name: %s" %
108 name
, errors
.ECODE_INVAL
)
110 raise errors
.ProgrammerError("No disk UUID or name was given")
115 def _UpgradeDiskNicMods(kind
, mods
, verify_fn
):
116 assert ht
.TList(mods
)
117 assert not mods
or len(mods
[0]) in (2, 3)
119 if mods
and len(mods
[0]) == 2:
123 for op
, params
in mods
:
124 if op
in (constants
.DDM_ADD
, constants
.DDM_ATTACH
,
125 constants
.DDM_REMOVE
, constants
.DDM_DETACH
):
126 result
.append((op
, -1, params
))
130 raise errors
.OpPrereqError("Only one %s add/attach/remove/detach "
131 "operation is supported at a time" %
132 kind
, errors
.ECODE_INVAL
)
134 result
.append((constants
.DDM_MODIFY
, op
, params
))
136 assert verify_fn(result
)
142 def _CheckMods(kind
, mods
, key_types
, item_fn
):
143 """Ensures requested disk/NIC modifications are valid.
145 Note that the 'attach' action needs a way to refer to the UUID of the disk,
146 since the disk name is not unique cluster-wide. However, the UUID of the
147 disk is not settable but rather generated by Ganeti automatically,
148 therefore it cannot be passed as an IDISK parameter. For this reason, this
149 function will override the checks to accept uuid parameters solely for the
152 # Create a key_types copy with the 'uuid' as a valid key type.
153 key_types_attach
= key_types
.copy()
154 key_types_attach
['uuid'] = 'string'
156 for (op
, _
, params
) in mods
:
157 assert ht
.TDict(params
)
159 # If 'key_types' is an empty dict, we assume we have an
160 # 'ext' template and thus do not ForceDictType
162 utils
.ForceDictType(params
, (key_types
if op
!= constants
.DDM_ATTACH
163 else key_types_attach
))
165 if op
in (constants
.DDM_REMOVE
, constants
.DDM_DETACH
):
167 raise errors
.OpPrereqError("No settings should be passed when"
168 " removing or detaching a %s" % kind
,
170 elif op
in (constants
.DDM_ADD
, constants
.DDM_ATTACH
,
171 constants
.DDM_MODIFY
):
174 raise errors
.ProgrammerError("Unhandled operation '%s'" % op
)
176 def _VerifyDiskModification(self
, op
, params
, excl_stor
, group_access_types
):
177 """Verifies a disk modification.
180 disk_type
= params
.get(
181 constants
.IDISK_TYPE
,
182 self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
))
184 if op
== constants
.DDM_ADD
:
185 params
[constants
.IDISK_TYPE
] = disk_type
187 if disk_type
== constants
.DT_DISKLESS
:
188 raise errors
.OpPrereqError(
189 "Must specify disk type on diskless instance", errors
.ECODE_INVAL
)
191 if disk_type
!= constants
.DT_EXT
:
192 utils
.ForceDictType(params
, constants
.IDISK_PARAMS_TYPES
)
194 mode
= params
.setdefault(constants
.IDISK_MODE
, constants
.DISK_RDWR
)
195 if mode
not in constants
.DISK_ACCESS_SET
:
196 raise errors
.OpPrereqError("Invalid disk access mode '%s'" % mode
,
199 size
= params
.get(constants
.IDISK_SIZE
, None)
201 raise errors
.OpPrereqError("Required disk parameter '%s' missing" %
202 constants
.IDISK_SIZE
, errors
.ECODE_INVAL
)
205 params
[constants
.IDISK_SIZE
] = size
206 name
= params
.get(constants
.IDISK_NAME
, None)
207 if name
is not None and name
.lower() == constants
.VALUE_NONE
:
208 params
[constants
.IDISK_NAME
] = None
210 # This check is necessary both when adding and attaching disks
211 if op
in (constants
.DDM_ADD
, constants
.DDM_ATTACH
):
212 CheckSpindlesExclusiveStorage(params
, excl_stor
, True)
213 CheckDiskExtProvider(params
, disk_type
)
215 # Make sure we do not add syncing disks to instances with inactive disks
216 if not self
.op
.wait_for_sync
and not self
.instance
.disks_active
:
217 raise errors
.OpPrereqError("Can't %s a disk to an instance with"
218 " deactivated disks and --no-wait-for-sync"
219 " given" % op
, errors
.ECODE_INVAL
)
221 # Check disk access param (only for specific disks)
222 if disk_type
in constants
.DTS_HAVE_ACCESS
:
223 access_type
= params
.get(constants
.IDISK_ACCESS
,
224 group_access_types
[disk_type
])
225 if not IsValidDiskAccessModeCombination(self
.instance
.hypervisor
,
226 disk_type
, access_type
):
227 raise errors
.OpPrereqError("Selected hypervisor (%s) cannot be"
228 " used with %s disk access param" %
229 (self
.instance
.hypervisor
, access_type
),
232 if op
== constants
.DDM_ATTACH
:
233 if len(params
) != 1 or ('uuid' not in params
and
234 constants
.IDISK_NAME
not in params
):
235 raise errors
.OpPrereqError("Only one argument is permitted in %s op,"
236 " either %s or uuid" % (constants
.DDM_ATTACH
,
237 constants
.IDISK_NAME
,
240 self
._CheckAttachDisk(params
)
242 elif op
== constants
.DDM_MODIFY
:
243 if constants
.IDISK_SIZE
in params
:
244 raise errors
.OpPrereqError("Disk size change not possible, use"
245 " grow-disk", errors
.ECODE_INVAL
)
247 disk_info
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
249 # Disk modification supports changing only the disk name and mode.
250 # Changing arbitrary parameters is allowed only for ext disk template",
251 if not utils
.AllDiskOfType(disk_info
, [constants
.DT_EXT
]):
252 utils
.ForceDictType(params
, constants
.MODIFIABLE_IDISK_PARAMS_TYPES
)
254 # We have to check that the 'access' and 'disk_provider' parameters
256 for param
in [constants
.IDISK_ACCESS
, constants
.IDISK_PROVIDER
]:
258 raise errors
.OpPrereqError("Disk '%s' parameter change is"
259 " not possible" % param
,
262 name
= params
.get(constants
.IDISK_NAME
, None)
263 if name
is not None and name
.lower() == constants
.VALUE_NONE
:
264 params
[constants
.IDISK_NAME
] = None
267 def _VerifyNicModification(op
, params
):
268 """Verifies a network interface modification.
271 if op
in (constants
.DDM_ADD
, constants
.DDM_MODIFY
):
272 ip
= params
.get(constants
.INIC_IP
, None)
273 name
= params
.get(constants
.INIC_NAME
, None)
274 req_net
= params
.get(constants
.INIC_NETWORK
, None)
275 link
= params
.get(constants
.NIC_LINK
, None)
276 mode
= params
.get(constants
.NIC_MODE
, None)
277 if name
is not None and name
.lower() == constants
.VALUE_NONE
:
278 params
[constants
.INIC_NAME
] = None
279 if req_net
is not None:
280 if req_net
.lower() == constants
.VALUE_NONE
:
281 params
[constants
.INIC_NETWORK
] = None
283 elif link
is not None or mode
is not None:
284 raise errors
.OpPrereqError("If network is given"
285 " mode or link should not",
288 if op
== constants
.DDM_ADD
:
289 macaddr
= params
.get(constants
.INIC_MAC
, None)
291 params
[constants
.INIC_MAC
] = constants
.VALUE_AUTO
294 if ip
.lower() == constants
.VALUE_NONE
:
295 params
[constants
.INIC_IP
] = None
297 if ip
.lower() == constants
.NIC_IP_POOL
:
298 if op
== constants
.DDM_ADD
and req_net
is None:
299 raise errors
.OpPrereqError("If ip=pool, parameter network"
303 if not netutils
.IPAddress
.IsValid(ip
):
304 raise errors
.OpPrereqError("Invalid IP address '%s'" % ip
,
307 if constants
.INIC_MAC
in params
:
308 macaddr
= params
[constants
.INIC_MAC
]
309 if macaddr
not in (constants
.VALUE_AUTO
, constants
.VALUE_GENERATE
):
310 macaddr
= utils
.NormalizeAndValidateMac(macaddr
)
312 if op
== constants
.DDM_MODIFY
and macaddr
== constants
.VALUE_AUTO
:
313 raise errors
.OpPrereqError("'auto' is not a valid MAC address when"
314 " modifying an existing NIC",
317 def _LookupDiskIndex(self
, idx
):
318 """Looks up uuid or name of disk if necessary."""
323 for i
, d
in enumerate(self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)):
324 if d
.name
== idx
or d
.uuid
== idx
:
326 raise errors
.OpPrereqError("Lookup of disk %r failed" % idx
)
328 def _LookupDiskMods(self
):
329 """Looks up uuid or name of disk if necessary."""
330 return [(op
, self
._LookupDiskIndex(idx
), params
)
331 for op
, idx
, params
in self
.op
.disks
]
333 def CheckArguments(self
):
334 if not (self
.op
.nics
or self
.op
.disks
or self
.op
.disk_template
or
335 self
.op
.hvparams
or self
.op
.beparams
or self
.op
.os_name
or
336 self
.op
.osparams
or self
.op
.offline
is not None or
337 self
.op
.runtime_mem
or self
.op
.pnode
or self
.op
.osparams_private
or
338 self
.op
.instance_communication
is not None):
339 raise errors
.OpPrereqError("No changes submitted", errors
.ECODE_INVAL
)
342 CheckParamsNotGlobal(self
.op
.hvparams
, constants
.HVC_GLOBALS
,
343 "hypervisor", "instance", "cluster")
345 self
.op
.disks
= self
._UpgradeDiskNicMods(
346 "disk", self
.op
.disks
,
347 ht
.TSetParamsMods(ht
.TIDiskParams
))
348 self
.op
.nics
= self
._UpgradeDiskNicMods(
349 "NIC", self
.op
.nics
, ht
.TSetParamsMods(ht
.TINicParams
))
351 # Check disk template modifications
352 if self
.op
.disk_template
:
354 raise errors
.OpPrereqError("Disk template conversion and other disk"
355 " changes not supported at the same time",
358 # mirrored template node checks
359 if self
.op
.disk_template
in constants
.DTS_INT_MIRROR
:
360 if not self
.op
.remote_node
:
361 raise errors
.OpPrereqError("Changing the disk template to a mirrored"
362 " one requires specifying a secondary"
363 " node", errors
.ECODE_INVAL
)
364 elif self
.op
.remote_node
:
365 self
.LogWarning("Changing the disk template to a non-mirrored one,"
366 " the secondary node will be ignored")
367 # the secondary node must be cleared in order to be ignored, otherwise
368 # the operation will fail, in the GenerateDiskTemplate method
369 self
.op
.remote_node
= None
371 # file-based template checks
372 if self
.op
.disk_template
in constants
.DTS_FILEBASED
:
373 self
._FillFileDriver()
375 # Check NIC modifications
376 self
._CheckMods("NIC", self
.op
.nics
, constants
.INIC_PARAMS_TYPES
,
377 self
._VerifyNicModification
)
380 (self
.op
.pnode_uuid
, self
.op
.pnode
) = \
381 ExpandNodeUuidAndName(self
.cfg
, self
.op
.pnode_uuid
, self
.op
.pnode
)
383 def _CheckAttachDisk(self
, params
):
384 """Check if disk can be attached to an instance.
386 Check if the disk and instance have the same template. Also, check if the
387 disk nodes are visible from the instance.
389 uuid
= params
.get("uuid", None)
390 name
= params
.get(constants
.IDISK_NAME
, None)
392 disk
= self
.GenericGetDiskInfo(uuid
, name
)
393 instance_template
= self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
)
394 if (disk
.dev_type
!= instance_template
and
395 instance_template
!= constants
.DT_DISKLESS
):
396 raise errors
.OpPrereqError("Instance has '%s' template while disk has"
398 (instance_template
, disk
.dev_type
),
401 instance_nodes
= self
.cfg
.GetInstanceNodes(self
.instance
.uuid
)
402 # Make sure we do not attach disks to instances on wrong nodes. If the
403 # instance is diskless, that instance is associated only to the primary
404 # node, whereas the disk can be associated to two nodes in the case of DRBD,
405 # hence, we have a subset check here.
406 if disk
.nodes
and not set(instance_nodes
).issubset(set(disk
.nodes
)):
407 raise errors
.OpPrereqError("Disk nodes are %s while the instance's nodes"
409 (disk
.nodes
, instance_nodes
),
411 # Make sure a DRBD disk has the same primary node as the instance where it
412 # will be attached to.
413 disk_primary
= disk
.GetPrimaryNode(self
.instance
.primary_node
)
414 if self
.instance
.primary_node
!= disk_primary
:
415 raise errors
.OpExecError("The disks' primary node is %s whereas the "
416 "instance's primary node is %s."
417 % (disk_primary
, self
.instance
.primary_node
))
419 def ExpandNames(self
):
420 self
._ExpandAndLockInstance()
421 self
.needed_locks
[locking
.LEVEL_NODEGROUP
] = []
422 # Can't even acquire node locks in shared mode as upcoming changes in
423 # Ganeti 2.6 will start to modify the node object on disk conversion
424 self
.needed_locks
[locking
.LEVEL_NODE
] = []
425 self
.needed_locks
[locking
.LEVEL_NODE_RES
] = []
426 self
.recalculate_locks
[locking
.LEVEL_NODE
] = constants
.LOCKS_REPLACE
427 # Look node group to look up the ipolicy
428 self
.share_locks
[locking
.LEVEL_NODEGROUP
] = 1
429 self
.dont_collate_locks
[locking
.LEVEL_NODEGROUP
] = True
430 self
.dont_collate_locks
[locking
.LEVEL_NODE
] = True
431 self
.dont_collate_locks
[locking
.LEVEL_NODE_RES
] = True
433 def DeclareLocks(self
, level
):
434 if level
== locking
.LEVEL_NODEGROUP
:
435 assert not self
.needed_locks
[locking
.LEVEL_NODEGROUP
]
436 # Acquire locks for the instance's nodegroups optimistically. Needs
437 # to be verified in CheckPrereq
438 self
.needed_locks
[locking
.LEVEL_NODEGROUP
] = \
439 self
.cfg
.GetInstanceNodeGroups(self
.op
.instance_uuid
)
440 elif level
== locking
.LEVEL_NODE
:
441 self
._LockInstancesNodes()
442 if self
.op
.disk_template
and self
.op
.remote_node
:
443 (self
.op
.remote_node_uuid
, self
.op
.remote_node
) = \
444 ExpandNodeUuidAndName(self
.cfg
, self
.op
.remote_node_uuid
,
446 self
.needed_locks
[locking
.LEVEL_NODE
].append(self
.op
.remote_node_uuid
)
447 elif level
== locking
.LEVEL_NODE_RES
and self
.op
.disk_template
:
449 self
.needed_locks
[locking
.LEVEL_NODE_RES
] = \
450 CopyLockList(self
.needed_locks
[locking
.LEVEL_NODE
])
452 def BuildHooksEnv(self
):
455 This runs on the master, primary and secondaries.
459 if constants
.BE_MINMEM
in self
.be_new
:
460 args
["minmem"] = self
.be_new
[constants
.BE_MINMEM
]
461 if constants
.BE_MAXMEM
in self
.be_new
:
462 args
["maxmem"] = self
.be_new
[constants
.BE_MAXMEM
]
463 if constants
.BE_VCPUS
in self
.be_new
:
464 args
["vcpus"] = self
.be_new
[constants
.BE_VCPUS
]
465 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
466 # information at all.
468 if self
._new_nics
is not None:
471 for nic
in self
._new_nics
:
472 n
= copy
.deepcopy(nic
)
473 nicparams
= self
.cluster
.SimpleFillNIC(n
.nicparams
)
474 n
.nicparams
= nicparams
475 nics
.append(NICToTuple(self
, n
))
479 env
= BuildInstanceHookEnvByObject(self
, self
.instance
, override
=args
)
480 if self
.op
.disk_template
:
481 env
["NEW_DISK_TEMPLATE"] = self
.op
.disk_template
482 if self
.op
.runtime_mem
:
483 env
["RUNTIME_MEMORY"] = self
.op
.runtime_mem
487 def BuildHooksNodes(self
):
488 """Build hooks nodes.
491 nl
= [self
.cfg
.GetMasterNode()] + \
492 list(self
.cfg
.GetInstanceNodes(self
.instance
.uuid
))
495 def _PrepareNicModification(self
, params
, private
, old_ip
, old_net_uuid
,
496 old_params
, cluster
, pnode_uuid
):
498 update_params_dict
= dict([(key
, params
[key
])
499 for key
in constants
.NICS_PARAMETERS
502 req_link
= update_params_dict
.get(constants
.NIC_LINK
, None)
503 req_mode
= update_params_dict
.get(constants
.NIC_MODE
, None)
506 new_net_uuid_or_name
= params
.get(constants
.INIC_NETWORK
, old_net_uuid
)
507 if new_net_uuid_or_name
:
508 new_net_uuid
= self
.cfg
.LookupNetwork(new_net_uuid_or_name
)
509 new_net_obj
= self
.cfg
.GetNetwork(new_net_uuid
)
512 old_net_obj
= self
.cfg
.GetNetwork(old_net_uuid
)
515 netparams
= self
.cfg
.GetGroupNetParams(new_net_uuid
, pnode_uuid
)
517 raise errors
.OpPrereqError("No netparams found for the network"
518 " %s, probably not connected" %
519 new_net_obj
.name
, errors
.ECODE_INVAL
)
520 new_params
= dict(netparams
)
522 new_params
= GetUpdatedParams(old_params
, update_params_dict
)
524 utils
.ForceDictType(new_params
, constants
.NICS_PARAMETER_TYPES
)
526 new_filled_params
= cluster
.SimpleFillNIC(new_params
)
527 objects
.NIC
.CheckParameterSyntax(new_filled_params
)
529 new_mode
= new_filled_params
[constants
.NIC_MODE
]
530 if new_mode
== constants
.NIC_MODE_BRIDGED
:
531 bridge
= new_filled_params
[constants
.NIC_LINK
]
532 msg
= self
.rpc
.call_bridges_exist(pnode_uuid
, [bridge
]).fail_msg
534 msg
= "Error checking bridges on node '%s': %s" % \
535 (self
.cfg
.GetNodeName(pnode_uuid
), msg
)
537 self
.warn
.append(msg
)
539 raise errors
.OpPrereqError(msg
, errors
.ECODE_ENVIRON
)
541 elif new_mode
== constants
.NIC_MODE_ROUTED
:
542 ip
= params
.get(constants
.INIC_IP
, old_ip
)
543 if ip
is None and not new_net_uuid
:
544 raise errors
.OpPrereqError("Cannot set the NIC IP address to None"
545 " on a routed NIC if not attached to a"
546 " network", errors
.ECODE_INVAL
)
548 elif new_mode
== constants
.NIC_MODE_OVS
:
549 # TODO: check OVS link
550 self
.LogInfo("OVS links are currently not checked for correctness")
552 if constants
.INIC_MAC
in params
:
553 mac
= params
[constants
.INIC_MAC
]
555 raise errors
.OpPrereqError("Cannot unset the NIC MAC address",
557 elif mac
in (constants
.VALUE_AUTO
, constants
.VALUE_GENERATE
):
558 # otherwise generate the MAC address
559 params
[constants
.INIC_MAC
] = \
560 self
.cfg
.GenerateMAC(new_net_uuid
, self
.proc
.GetECId())
562 # or validate/reserve the current one
564 self
.cfg
.ReserveMAC(mac
, self
.proc
.GetECId())
565 except errors
.ReservationError
:
566 raise errors
.OpPrereqError("MAC address '%s' already in use"
568 errors
.ECODE_NOTUNIQUE
)
569 elif new_net_uuid
!= old_net_uuid
:
571 def get_net_prefix(net_uuid
):
574 nobj
= self
.cfg
.GetNetwork(net_uuid
)
575 mac_prefix
= nobj
.mac_prefix
579 new_prefix
= get_net_prefix(new_net_uuid
)
580 old_prefix
= get_net_prefix(old_net_uuid
)
581 if old_prefix
!= new_prefix
:
582 params
[constants
.INIC_MAC
] = \
583 self
.cfg
.GenerateMAC(new_net_uuid
, self
.proc
.GetECId())
585 # if there is a change in (ip, network) tuple
586 new_ip
= params
.get(constants
.INIC_IP
, old_ip
)
587 if (new_ip
, new_net_uuid
) != (old_ip
, old_net_uuid
):
589 # if IP is pool then require a network and generate one IP
590 if new_ip
.lower() == constants
.NIC_IP_POOL
:
593 new_ip
= self
.cfg
.GenerateIp(new_net_uuid
, self
.proc
.GetECId())
594 except errors
.ReservationError
:
595 raise errors
.OpPrereqError("Unable to get a free IP"
596 " from the address pool",
598 self
.LogInfo("Chose IP %s from network %s",
601 params
[constants
.INIC_IP
] = new_ip
603 raise errors
.OpPrereqError("ip=pool, but no network found",
605 # Reserve new IP if in the new network if any
608 self
.cfg
.ReserveIp(new_net_uuid
, new_ip
, self
.proc
.GetECId(),
609 check
=self
.op
.conflicts_check
)
610 self
.LogInfo("Reserving IP %s in network %s",
611 new_ip
, new_net_obj
.name
)
612 except errors
.ReservationError
:
613 raise errors
.OpPrereqError("IP %s not available in network %s" %
614 (new_ip
, new_net_obj
.name
),
615 errors
.ECODE_NOTUNIQUE
)
616 # new network is None so check if new IP is a conflicting IP
617 elif self
.op
.conflicts_check
:
618 CheckForConflictingIp(self
, new_ip
, pnode_uuid
)
620 # release old IP if old network is not None
621 if old_ip
and old_net_uuid
:
623 self
.cfg
.ReleaseIp(old_net_uuid
, old_ip
, self
.proc
.GetECId())
624 except errors
.AddressPoolError
:
625 logging
.warning("Release IP %s not contained in network %s",
626 old_ip
, old_net_obj
.name
)
628 # there are no changes in (ip, network) tuple and old network is not None
629 elif (old_net_uuid
is not None and
630 (req_link
is not None or req_mode
is not None)):
631 raise errors
.OpPrereqError("Not allowed to change link or mode of"
632 " a NIC that is connected to a network",
635 private
.params
= new_params
636 private
.filled
= new_filled_params
638 def _PreCheckDiskTemplate(self
, pnode_info
):
639 """CheckPrereq checks related to a new disk template."""
640 # Arguments are passed to avoid configuration lookups
641 pnode_uuid
= self
.instance
.primary_node
643 # TODO make sure heterogeneous disk types can be converted.
644 disk_template
= self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
)
645 if disk_template
== constants
.DT_MIXED
:
646 raise errors
.OpPrereqError(
647 "Conversion from mixed is not yet supported.")
649 inst_disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
650 if utils
.AnyDiskOfType(inst_disks
, constants
.DTS_NOT_CONVERTIBLE_FROM
):
651 raise errors
.OpPrereqError(
652 "Conversion from the '%s' disk template is not supported"
653 % self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
),
656 elif self
.op
.disk_template
in constants
.DTS_NOT_CONVERTIBLE_TO
:
657 raise errors
.OpPrereqError("Conversion to the '%s' disk template is"
658 " not supported" % self
.op
.disk_template
,
661 if (self
.op
.disk_template
!= constants
.DT_EXT
and
662 utils
.AllDiskOfType(inst_disks
, [self
.op
.disk_template
])):
663 raise errors
.OpPrereqError("Instance already has disk template %s" %
664 self
.op
.disk_template
, errors
.ECODE_INVAL
)
666 if not self
.cluster
.IsDiskTemplateEnabled(self
.op
.disk_template
):
667 enabled_dts
= utils
.CommaJoin(self
.cluster
.enabled_disk_templates
)
668 raise errors
.OpPrereqError("Disk template '%s' is not enabled for this"
669 " cluster (enabled templates: %s)" %
670 (self
.op
.disk_template
, enabled_dts
),
673 default_vg
= self
.cfg
.GetVGName()
674 if (not default_vg
and
675 self
.op
.disk_template
not in constants
.DTS_NOT_LVM
):
676 raise errors
.OpPrereqError("Disk template conversions to lvm-based"
677 " instances are not supported by the cluster",
680 CheckInstanceState(self
, self
.instance
, INSTANCE_DOWN
,
681 msg
="cannot change disk template")
683 # compute new disks' information
684 self
.disks_info
= ComputeDisksInfo(inst_disks
, self
.op
.disk_template
,
685 default_vg
, self
.op
.ext_params
)
687 # mirror node verification
688 if self
.op
.disk_template
in constants
.DTS_INT_MIRROR
:
689 if self
.op
.remote_node_uuid
== pnode_uuid
:
690 raise errors
.OpPrereqError("Given new secondary node %s is the same"
691 " as the primary node of the instance" %
692 self
.op
.remote_node
, errors
.ECODE_STATE
)
693 CheckNodeOnline(self
, self
.op
.remote_node_uuid
)
694 CheckNodeNotDrained(self
, self
.op
.remote_node_uuid
)
695 CheckNodeVmCapable(self
, self
.op
.remote_node_uuid
)
697 snode_info
= self
.cfg
.GetNodeInfo(self
.op
.remote_node_uuid
)
698 snode_group
= self
.cfg
.GetNodeGroup(snode_info
.group
)
699 ipolicy
= ganeti
.masterd
.instance
.CalculateGroupIPolicy(self
.cluster
,
701 CheckTargetNodeIPolicy(self
, ipolicy
, self
.instance
, snode_info
, self
.cfg
,
702 ignore
=self
.op
.ignore_ipolicy
)
703 if pnode_info
.group
!= snode_info
.group
:
704 self
.LogWarning("The primary and secondary nodes are in two"
705 " different node groups; the disk parameters"
706 " from the first disk's node group will be"
709 # check that the template is in the primary node group's allowed templates
710 pnode_group
= self
.cfg
.GetNodeGroup(pnode_info
.group
)
711 ipolicy
= ganeti
.masterd
.instance
.CalculateGroupIPolicy(self
.cluster
,
713 allowed_dts
= ipolicy
[constants
.IPOLICY_DTS
]
714 if self
.op
.disk_template
not in allowed_dts
:
715 raise errors
.OpPrereqError("Disk template '%s' in not allowed (allowed"
716 " templates: %s)" % (self
.op
.disk_template
,
717 utils
.CommaJoin(allowed_dts
)),
720 if not self
.op
.disk_template
in constants
.DTS_EXCL_STORAGE
:
721 # Make sure none of the nodes require exclusive storage
723 if self
.op
.disk_template
in constants
.DTS_INT_MIRROR
:
725 nodes
.append(snode_info
)
726 has_es
= lambda n
: IsExclusiveStorageEnabledNode(self
.cfg
, n
)
727 if compat
.any(map(has_es
, nodes
)):
728 errmsg
= ("Cannot convert disk template from %s to %s when exclusive"
729 " storage is enabled" % (
730 self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
),
731 self
.op
.disk_template
))
732 raise errors
.OpPrereqError(errmsg
, errors
.ECODE_STATE
)
734 # TODO remove setting the disk template after DiskSetParams exists.
735 # node capacity checks
736 if (self
.op
.disk_template
== constants
.DT_PLAIN
and
737 utils
.AllDiskOfType(inst_disks
, [constants
.DT_DRBD8
])):
738 # we ensure that no capacity checks will be made for conversions from
739 # the 'drbd' to the 'plain' disk template
741 elif (self
.op
.disk_template
== constants
.DT_DRBD8
and
742 utils
.AllDiskOfType(inst_disks
, [constants
.DT_PLAIN
])):
743 # for conversions from the 'plain' to the 'drbd' disk template, check
744 # only the remote node's capacity
745 req_sizes
= ComputeDiskSizePerVG(self
.op
.disk_template
, self
.disks_info
)
746 CheckNodesFreeDiskPerVG(self
, [self
.op
.remote_node_uuid
], req_sizes
)
747 elif self
.op
.disk_template
in constants
.DTS_LVM
:
748 # rest lvm-based capacity checks
749 node_uuids
= [pnode_uuid
]
750 if self
.op
.remote_node_uuid
:
751 node_uuids
.append(self
.op
.remote_node_uuid
)
752 req_sizes
= ComputeDiskSizePerVG(self
.op
.disk_template
, self
.disks_info
)
753 CheckNodesFreeDiskPerVG(self
, node_uuids
, req_sizes
)
754 elif self
.op
.disk_template
== constants
.DT_RBD
:
755 # CheckRADOSFreeSpace() is simply a placeholder
756 CheckRADOSFreeSpace()
757 elif self
.op
.disk_template
== constants
.DT_EXT
:
758 # FIXME: Capacity checks for extstorage template, if exists
761 # FIXME: Checks about other non lvm-based disk templates
764 def _PreCheckDisks(self
, ispec
):
765 """CheckPrereq checks related to disk changes.
768 @param ispec: instance specs to be updated with the new disks
771 self
.diskparams
= self
.cfg
.GetInstanceDiskParams(self
.instance
)
773 inst_nodes
= self
.cfg
.GetInstanceNodes(self
.instance
.uuid
)
774 excl_stor
= compat
.any(
775 rpc
.GetExclusiveStorageForNodes(self
.cfg
, inst_nodes
).values()
778 # Get the group access type
779 node_info
= self
.cfg
.GetNodeInfo(self
.instance
.primary_node
)
780 node_group
= self
.cfg
.GetNodeGroup(node_info
.group
)
781 group_disk_params
= self
.cfg
.GetGroupDiskParams(node_group
)
783 group_access_types
= dict(
784 (dt
, group_disk_params
[dt
].get(
785 constants
.RBD_ACCESS
, constants
.DISK_KERNELSPACE
))
786 for dt
in constants
.DISK_TEMPLATES
)
788 # Check disk modifications. This is done here and not in CheckArguments
789 # (as with NICs), because we need to know the instance's disk template
790 ver_fn
= lambda op
, par
: self
._VerifyDiskModification(op
, par
, excl_stor
,
792 # Don't enforce param types here in case it's an ext disk added. The check
793 # happens inside _VerifyDiskModification.
794 self
._CheckMods("disk", self
.op
.disks
, {}, ver_fn
)
796 self
.diskmod
= PrepareContainerMods(self
.op
.disks
, None)
798 def _PrepareDiskMod(_
, disk
, params
, __
):
799 disk
.name
= params
.get(constants
.IDISK_NAME
, None)
801 # Verify disk changes (operating on a copy)
802 inst_disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
803 disks
= copy
.deepcopy(inst_disks
)
804 ApplyContainerMods("disk", disks
, None, self
.diskmod
, None, None,
805 _PrepareDiskMod
, None, None)
806 utils
.ValidateDeviceNames("disk", disks
)
807 if len(disks
) > constants
.MAX_DISKS
:
808 raise errors
.OpPrereqError("Instance has too many disks (%d), cannot add"
809 " more" % constants
.MAX_DISKS
,
811 disk_sizes
= [disk
.size
for disk
in inst_disks
]
812 disk_sizes
.extend(params
["size"] for (op
, idx
, params
, private
) in
813 self
.diskmod
if op
== constants
.DDM_ADD
)
814 ispec
[constants
.ISPEC_DISK_COUNT
] = len(disk_sizes
)
815 ispec
[constants
.ISPEC_DISK_SIZE
] = disk_sizes
817 # either --online or --offline was passed
818 if self
.op
.offline
is not None:
820 msg
= "can't change to offline without being down first"
822 msg
= "can't change to online (down) without being offline first"
823 CheckInstanceState(self
, self
.instance
, INSTANCE_NOT_RUNNING
,
827 def _InstanceCommunicationDDM(cfg
, instance_communication
, instance
):
828 """Create a NIC mod that adds or removes the instance
829 communication NIC to a running instance.
831 The NICS are dynamically created using the Dynamic Device
832 Modification (DDM). This function produces a NIC modification
833 (mod) that inserts an additional NIC meant for instance
834 communication in or removes an existing instance communication NIC
835 from a running instance, using DDM.
837 @type cfg: L{config.ConfigWriter}
838 @param cfg: cluster configuration
840 @type instance_communication: boolean
841 @param instance_communication: whether instance communication is
844 @type instance: L{objects.Instance}
845 @param instance: instance to which the NIC mod will be applied to
847 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
848 (L{constants.DDM_REMOVE}, -1, parameters) or
850 @return: DDM mod containing an action to add or remove the NIC, or
851 None if nothing needs to be done
854 nic_name
= ComputeInstanceCommunicationNIC(instance
.name
)
856 instance_communication_nic
= None
858 for nic
in instance
.nics
:
859 if nic
.name
== nic_name
:
860 instance_communication_nic
= nic
863 if instance_communication
and not instance_communication_nic
:
864 action
= constants
.DDM_ADD
865 params
= {constants
.INIC_NAME
: nic_name
,
866 constants
.INIC_MAC
: constants
.VALUE_GENERATE
,
867 constants
.INIC_IP
: constants
.NIC_IP_POOL
,
868 constants
.INIC_NETWORK
:
869 cfg
.GetInstanceCommunicationNetwork()}
870 elif not instance_communication
and instance_communication_nic
:
871 action
= constants
.DDM_REMOVE
877 if action
is not None:
878 return (action
, -1, params
)
882 def _GetInstanceInfo(self
, cluster_hvparams
):
883 pnode_uuid
= self
.instance
.primary_node
884 instance_info
= self
.rpc
.call_instance_info(
885 pnode_uuid
, self
.instance
.name
, self
.instance
.hypervisor
,
889 def _CheckHotplug(self
):
890 if self
.op
.hotplug
or self
.op
.hotplug_if_possible
:
891 result
= self
.rpc
.call_hotplug_supported(self
.instance
.primary_node
,
895 result
.Raise("Hotplug is not possible: %s" % result
.fail_msg
,
896 prereq
=True, ecode
=errors
.ECODE_STATE
)
898 self
.LogWarning(result
.fail_msg
)
899 self
.op
.hotplug
= False
900 self
.LogInfo("Modification will take place without hotplugging.")
902 self
.op
.hotplug
= True
904 def _PrepareNicCommunication(self
):
905 # add or remove NIC for instance communication
906 if self
.op
.instance_communication
is not None:
907 mod
= self
._InstanceCommunicationDDM(self
.cfg
,
908 self
.op
.instance_communication
,
911 self
.op
.nics
.append(mod
)
913 self
.nicmod
= PrepareContainerMods(self
.op
.nics
, InstNicModPrivate
)
915 def _ProcessHVParams(self
, node_uuids
):
917 hv_type
= self
.instance
.hypervisor
918 i_hvdict
= GetUpdatedParams(self
.instance
.hvparams
, self
.op
.hvparams
)
919 utils
.ForceDictType(i_hvdict
, constants
.HVS_PARAMETER_TYPES
)
920 hv_new
= self
.cluster
.SimpleFillHV(hv_type
, self
.instance
.os
, i_hvdict
)
923 hypervisor
.GetHypervisorClass(hv_type
).CheckParameterSyntax(hv_new
)
924 CheckHVParams(self
, node_uuids
, self
.instance
.hypervisor
, hv_new
)
925 self
.hv_proposed
= self
.hv_new
= hv_new
# the new actual values
926 self
.hv_inst
= i_hvdict
# the new dict (without defaults)
928 self
.hv_proposed
= self
.cluster
.SimpleFillHV(self
.instance
.hypervisor
,
930 self
.instance
.hvparams
)
931 self
.hv_new
= self
.hv_inst
= {}
933 def _ProcessBeParams(self
):
935 i_bedict
= GetUpdatedParams(self
.instance
.beparams
, self
.op
.beparams
,
937 objects
.UpgradeBeParams(i_bedict
)
938 utils
.ForceDictType(i_bedict
, constants
.BES_PARAMETER_TYPES
)
939 be_new
= self
.cluster
.SimpleFillBE(i_bedict
)
940 self
.be_proposed
= self
.be_new
= be_new
# the new actual values
941 self
.be_inst
= i_bedict
# the new dict (without defaults)
943 self
.be_new
= self
.be_inst
= {}
944 self
.be_proposed
= self
.cluster
.SimpleFillBE(self
.instance
.beparams
)
945 return self
.cluster
.FillBE(self
.instance
)
947 def _ValidateCpuParams(self
):
948 # CPU param validation -- checking every time a parameter is
949 # changed to cover all cases where either CPU mask or vcpus have
951 if (constants
.BE_VCPUS
in self
.be_proposed
and
952 constants
.HV_CPU_MASK
in self
.hv_proposed
):
954 utils
.ParseMultiCpuMask(self
.hv_proposed
[constants
.HV_CPU_MASK
])
955 # Verify mask is consistent with number of vCPUs. Can skip this
956 # test if only 1 entry in the CPU mask, which means same mask
957 # is applied to all vCPUs.
958 if (len(cpu_list
) > 1 and
959 len(cpu_list
) != self
.be_proposed
[constants
.BE_VCPUS
]):
960 raise errors
.OpPrereqError("Number of vCPUs [%d] does not match the"
962 (self
.be_proposed
[constants
.BE_VCPUS
],
963 self
.hv_proposed
[constants
.HV_CPU_MASK
]),
966 # Only perform this test if a new CPU mask is given
967 if constants
.HV_CPU_MASK
in self
.hv_new
and cpu_list
:
968 # Calculate the largest CPU number requested
969 max_requested_cpu
= max(map(max, cpu_list
))
970 # Check that all of the instance's nodes have enough physical CPUs to
971 # satisfy the requested CPU mask
972 hvspecs
= [(self
.instance
.hypervisor
,
973 self
.cfg
.GetClusterInfo()
974 .hvparams
[self
.instance
.hypervisor
])]
975 CheckNodesPhysicalCPUs(self
,
976 self
.cfg
.GetInstanceNodes(self
.instance
.uuid
),
977 max_requested_cpu
+ 1,
980 def _ProcessOsParams(self
, node_uuids
):
981 # osparams processing
982 instance_os
= (self
.op
.os_name
983 if self
.op
.os_name
and not self
.op
.force
984 else self
.instance
.os
)
986 if self
.op
.osparams
or self
.op
.osparams_private
:
987 public_parms
= self
.op
.osparams
or {}
988 private_parms
= self
.op
.osparams_private
or {}
989 dupe_keys
= utils
.GetRepeatedKeys(public_parms
, private_parms
)
992 raise errors
.OpPrereqError("OS parameters repeated multiple times: %s" %
993 utils
.CommaJoin(dupe_keys
))
995 self
.os_inst
= GetUpdatedParams(self
.instance
.osparams
,
997 self
.os_inst_private
= GetUpdatedParams(self
.instance
.osparams_private
,
1000 CheckOSParams(self
, True, node_uuids
, instance_os
,
1001 objects
.FillDict(self
.os_inst
,
1002 self
.os_inst_private
),
1003 self
.op
.force_variant
)
1007 self
.os_inst_private
= {}
1009 def _ProcessMem(self
, cluster_hvparams
, be_old
, pnode_uuid
):
1010 #TODO(dynmem): do the appropriate check involving MINMEM
1011 if (constants
.BE_MAXMEM
in self
.op
.beparams
and not self
.op
.force
and
1012 self
.be_new
[constants
.BE_MAXMEM
] > be_old
[constants
.BE_MAXMEM
]):
1013 mem_check_list
= [pnode_uuid
]
1014 if self
.be_new
[constants
.BE_AUTO_BALANCE
]:
1015 # either we changed auto_balance to yes or it was from before
1016 mem_check_list
.extend(
1017 self
.cfg
.GetInstanceSecondaryNodes(self
.instance
.uuid
))
1018 instance_info
= self
._GetInstanceInfo(cluster_hvparams
)
1019 hvspecs
= [(self
.instance
.hypervisor
,
1021 nodeinfo
= self
.rpc
.call_node_info(mem_check_list
, None,
1023 pninfo
= nodeinfo
[pnode_uuid
]
1024 msg
= pninfo
.fail_msg
1026 # Assume the primary node is unreachable and go ahead
1027 self
.warn
.append("Can't get info from primary node %s: %s" %
1028 (self
.cfg
.GetNodeName(pnode_uuid
), msg
))
1030 (_
, _
, (pnhvinfo
, )) = pninfo
.payload
1031 if not isinstance(pnhvinfo
.get("memory_free", None), int):
1032 self
.warn
.append("Node data from primary node %s doesn't contain"
1033 " free memory information" %
1034 self
.cfg
.GetNodeName(pnode_uuid
))
1035 elif instance_info
.fail_msg
:
1036 self
.warn
.append("Can't get instance runtime information: %s" %
1037 instance_info
.fail_msg
)
1039 if instance_info
.payload
:
1040 current_mem
= int(instance_info
.payload
["memory"])
1042 # Assume instance not running
1043 # (there is a slight race condition here, but it's not very
1044 # probable, and we have no other way to check)
1045 # TODO: Describe race condition
1047 #TODO(dynmem): do the appropriate check involving MINMEM
1048 miss_mem
= (self
.be_new
[constants
.BE_MAXMEM
] - current_mem
-
1049 pnhvinfo
["memory_free"])
1051 raise errors
.OpPrereqError("This change will prevent the instance"
1052 " from starting, due to %d MB of memory"
1053 " missing on its primary node" %
1054 miss_mem
, errors
.ECODE_NORES
)
1056 if self
.be_new
[constants
.BE_AUTO_BALANCE
]:
1058 self
.cfg
.GetInstanceSecondaryNodes(self
.instance
.uuid
)
1059 for node_uuid
, nres
in nodeinfo
.items():
1060 if node_uuid
not in secondary_nodes
:
1062 nres
.Raise("Can't get info from secondary node %s" %
1063 self
.cfg
.GetNodeName(node_uuid
), prereq
=True,
1064 ecode
=errors
.ECODE_STATE
)
1065 (_
, _
, (nhvinfo
, )) = nres
.payload
1066 if not isinstance(nhvinfo
.get("memory_free", None), int):
1067 raise errors
.OpPrereqError("Secondary node %s didn't return free"
1068 " memory information" %
1069 self
.cfg
.GetNodeName(node_uuid
),
1071 #TODO(dynmem): do the appropriate check involving MINMEM
1072 elif self
.be_new
[constants
.BE_MAXMEM
] > nhvinfo
["memory_free"]:
1073 raise errors
.OpPrereqError("This change will prevent the instance"
1074 " from failover to its secondary node"
1075 " %s, due to not enough memory" %
1076 self
.cfg
.GetNodeName(node_uuid
),
1079 if self
.op
.runtime_mem
:
1080 remote_info
= self
.rpc
.call_instance_info(
1081 self
.instance
.primary_node
, self
.instance
.name
,
1082 self
.instance
.hypervisor
,
1084 remote_info
.Raise("Error checking node %s" %
1085 self
.cfg
.GetNodeName(self
.instance
.primary_node
),
1087 if not remote_info
.payload
: # not running already
1088 raise errors
.OpPrereqError("Instance %s is not running" %
1089 self
.instance
.name
, errors
.ECODE_STATE
)
1091 current_memory
= remote_info
.payload
["memory"]
1092 if (not self
.op
.force
and
1093 (self
.op
.runtime_mem
> self
.be_proposed
[constants
.BE_MAXMEM
] or
1094 self
.op
.runtime_mem
< self
.be_proposed
[constants
.BE_MINMEM
])):
1095 raise errors
.OpPrereqError("Instance %s must have memory between %d"
1096 " and %d MB of memory unless --force is"
1098 (self
.instance
.name
,
1099 self
.be_proposed
[constants
.BE_MINMEM
],
1100 self
.be_proposed
[constants
.BE_MAXMEM
]),
1103 delta
= self
.op
.runtime_mem
- current_memory
1105 CheckNodeFreeMemory(
1106 self
, self
.instance
.primary_node
,
1107 "ballooning memory for instance %s" % self
.instance
.name
, delta
,
1108 self
.instance
.hypervisor
,
1109 self
.cfg
.GetClusterInfo().hvparams
[self
.instance
.hypervisor
])
1111 def CheckPrereq(self
):
1112 """Check prerequisites.
1114 This only checks the instance list against the existing names.
1117 assert self
.op
.instance_name
in self
.owned_locks(locking
.LEVEL_INSTANCE
)
1118 self
.instance
= self
.cfg
.GetInstanceInfo(self
.op
.instance_uuid
)
1119 self
.cluster
= self
.cfg
.GetClusterInfo()
1120 cluster_hvparams
= self
.cluster
.hvparams
[self
.instance
.hypervisor
]
1122 self
.op
.disks
= self
._LookupDiskMods()
1124 assert self
.instance
is not None, \
1125 "Cannot retrieve locked instance %s" % self
.op
.instance_name
1129 if (self
.op
.pnode_uuid
is not None and
1130 self
.op
.pnode_uuid
!= self
.instance
.primary_node
and
1132 instance_info
= self
._GetInstanceInfo(cluster_hvparams
)
1134 if instance_info
.fail_msg
:
1135 self
.warn
.append("Can't get instance runtime information: %s" %
1136 instance_info
.fail_msg
)
1137 elif instance_info
.payload
:
1138 raise errors
.OpPrereqError(
1139 "Instance is still running on %s" %
1140 self
.cfg
.GetNodeName(self
.instance
.primary_node
),
1142 pnode_uuid
= self
.instance
.primary_node
1143 assert pnode_uuid
in self
.owned_locks(locking
.LEVEL_NODE
)
1145 node_uuids
= list(self
.cfg
.GetInstanceNodes(self
.instance
.uuid
))
1146 pnode_info
= self
.cfg
.GetNodeInfo(pnode_uuid
)
1148 assert pnode_info
.group
in self
.owned_locks(locking
.LEVEL_NODEGROUP
)
1149 group_info
= self
.cfg
.GetNodeGroup(pnode_info
.group
)
1151 # dictionary with instance information after the modification
1154 self
._CheckHotplug()
1156 self
._PrepareNicCommunication()
1159 assert not (self
.op
.disk_template
and self
.op
.disks
), \
1160 "Can't modify disk template and apply disk changes at the same time"
1162 if self
.op
.disk_template
:
1163 self
._PreCheckDiskTemplate(pnode_info
)
1165 self
._PreCheckDisks(ispec
)
1167 self
._ProcessHVParams(node_uuids
)
1168 be_old
= self
._ProcessBeParams()
1170 self
._ValidateCpuParams()
1171 self
._ProcessOsParams(node_uuids
)
1172 self
._ProcessMem(cluster_hvparams
, be_old
, pnode_uuid
)
1174 # make self.cluster visible in the functions below
1175 cluster
= self
.cluster
1177 def _PrepareNicCreate(_
, params
, private
):
1178 self
._PrepareNicModification(params
, private
, None, None,
1179 {}, cluster
, pnode_uuid
)
1182 def _PrepareNicAttach(_
, __
, ___
):
1183 raise errors
.OpPrereqError("Attach operation is not supported for NICs",
1186 def _PrepareNicMod(_
, nic
, params
, private
):
1187 self
._PrepareNicModification(params
, private
, nic
.ip
, nic
.network
,
1188 nic
.nicparams
, cluster
, pnode_uuid
)
1191 def _PrepareNicRemove(_
, params
, __
):
1193 net
= params
.network
1194 if net
is not None and ip
is not None:
1195 self
.cfg
.ReleaseIp(net
, ip
, self
.proc
.GetECId())
1197 def _PrepareNicDetach(_
, __
, ___
):
1198 raise errors
.OpPrereqError("Detach operation is not supported for NICs",
1201 # Verify NIC changes (operating on copy)
1202 nics
= [nic
.Copy() for nic
in self
.instance
.nics
]
1203 ApplyContainerMods("NIC", nics
, None, self
.nicmod
, _PrepareNicCreate
,
1204 _PrepareNicAttach
, _PrepareNicMod
, _PrepareNicRemove
,
1206 if len(nics
) > constants
.MAX_NICS
:
1207 raise errors
.OpPrereqError("Instance has too many network interfaces"
1208 " (%d), cannot add more" % constants
.MAX_NICS
,
1211 # Pre-compute NIC changes (necessary to use result in hooks)
1212 self
._nic_chgdesc
= []
1214 # Operate on copies as this is still in prereq
1215 nics
= [nic
.Copy() for nic
in self
.instance
.nics
]
1216 ApplyContainerMods("NIC", nics
, self
._nic_chgdesc
, self
.nicmod
,
1217 self
._CreateNewNic
, None, self
._ApplyNicMods
,
1218 self
._RemoveNic
, None)
1219 # Verify that NIC names are unique and valid
1220 utils
.ValidateDeviceNames("NIC", nics
)
1221 self
._new_nics
= nics
1222 ispec
[constants
.ISPEC_NIC_COUNT
] = len(self
._new_nics
)
1224 self
._new_nics
= None
1225 ispec
[constants
.ISPEC_NIC_COUNT
] = len(self
.instance
.nics
)
1227 if not self
.op
.ignore_ipolicy
:
1228 ipolicy
= ganeti
.masterd
.instance
.CalculateGroupIPolicy(self
.cluster
,
1231 # Fill ispec with backend parameters
1232 ispec
[constants
.ISPEC_SPINDLE_USE
] = \
1233 self
.be_new
.get(constants
.BE_SPINDLE_USE
, None)
1234 ispec
[constants
.ISPEC_CPU_COUNT
] = self
.be_new
.get(constants
.BE_VCPUS
,
1237 # Copy ispec to verify parameters with min/max values separately
1238 if self
.op
.disk_template
:
1239 count
= ispec
[constants
.ISPEC_DISK_COUNT
]
1240 new_disk_types
= [self
.op
.disk_template
] * count
1242 old_disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
1243 add_disk_count
= ispec
[constants
.ISPEC_DISK_COUNT
] - len(old_disks
)
1244 dev_type
= self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
)
1245 if dev_type
== constants
.DT_DISKLESS
and add_disk_count
!= 0:
1246 raise errors
.ProgrammerError(
1247 "Conversion from diskless instance not possible and should have"
1250 new_disk_types
= ([d
.dev_type
for d
in old_disks
] +
1251 [dev_type
] * add_disk_count
)
1252 ispec_max
= ispec
.copy()
1253 ispec_max
[constants
.ISPEC_MEM_SIZE
] = \
1254 self
.be_new
.get(constants
.BE_MAXMEM
, None)
1255 res_max
= ComputeIPolicyInstanceSpecViolation(ipolicy
, ispec_max
,
1257 ispec_min
= ispec
.copy()
1258 ispec_min
[constants
.ISPEC_MEM_SIZE
] = \
1259 self
.be_new
.get(constants
.BE_MINMEM
, None)
1260 res_min
= ComputeIPolicyInstanceSpecViolation(ipolicy
, ispec_min
,
1263 if (res_max
or res_min
):
1264 # FIXME: Improve error message by including information about whether
1265 # the upper or lower limit of the parameter fails the ipolicy.
1266 msg
= ("Instance allocation to group %s (%s) violates policy: %s" %
1267 (group_info
, group_info
.name
,
1268 utils
.CommaJoin(set(res_max
+ res_min
))))
1269 raise errors
.OpPrereqError(msg
, errors
.ECODE_INVAL
)
1271 def _ConvertInstanceDisks(self
, feedback_fn
):
1272 """Converts the disks of an instance to another type.
1274 This function converts the disks of an instance. It supports
1275 conversions among all the available disk types except conversions
1276 between the LVM-based disk types, that use their separate code path.
1277 Also, this method does not support conversions that include the 'diskless'
1278 template and those targeting the 'blockdev' template.
1280 @type feedback_fn: callable
1281 @param feedback_fn: function used to send feedback back to the caller
1285 @raise errors.OpPrereqError: in case of failure
1288 template_info
= self
.op
.disk_template
1289 if self
.op
.disk_template
== constants
.DT_EXT
:
1290 template_info
= ":".join([self
.op
.disk_template
,
1291 self
.op
.ext_params
["provider"]])
1293 old_template
= self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
)
1294 feedback_fn("Converting disk template from '%s' to '%s'" %
1295 (old_template
, template_info
))
1297 assert not (old_template
in constants
.DTS_NOT_CONVERTIBLE_FROM
or
1298 self
.op
.disk_template
in constants
.DTS_NOT_CONVERTIBLE_TO
), \
1299 ("Unsupported disk template conversion from '%s' to '%s'" %
1300 (old_template
, self
.op
.disk_template
))
1302 pnode_uuid
= self
.instance
.primary_node
1304 if self
.op
.remote_node_uuid
:
1305 snode_uuid
= [self
.op
.remote_node_uuid
]
1307 old_disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
1309 feedback_fn("Generating new '%s' disk template..." % template_info
)
1310 file_storage_dir
= CalculateFileStorageDir(
1311 self
.op
.disk_template
, self
.cfg
, self
.instance
.name
,
1312 file_storage_dir
=self
.op
.file_storage_dir
)
1313 new_disks
= GenerateDiskTemplate(self
,
1314 self
.op
.disk_template
,
1320 self
.op
.file_driver
,
1325 # Create the new block devices for the instance.
1326 feedback_fn("Creating new empty disks of type '%s'..." % template_info
)
1328 CreateDisks(self
, self
.instance
, disk_template
=self
.op
.disk_template
,
1330 except errors
.OpExecError
:
1331 self
.LogWarning("Device creation failed")
1332 for disk
in new_disks
:
1333 self
.cfg
.ReleaseDRBDMinors(disk
.uuid
)
1336 # Transfer the data from the old to the newly created disks of the instance.
1337 feedback_fn("Populating the new empty disks of type '%s'..." %
1339 for idx
, (old
, new
) in enumerate(zip(old_disks
, new_disks
)):
1340 feedback_fn(" - copying data from disk %s (%s), size %s" %
1342 utils
.FormatUnit(new
.size
, "h")))
1343 if old
.dev_type
== constants
.DT_DRBD8
:
1344 old
= old
.children
[0]
1345 result
= self
.rpc
.call_blockdev_convert(pnode_uuid
, (old
, self
.instance
),
1346 (new
, self
.instance
))
1347 msg
= result
.fail_msg
1349 # A disk failed to copy. Abort the conversion operation and rollback
1350 # the modifications to the previous state. The instance will remain
1352 if self
.op
.disk_template
== constants
.DT_DRBD8
:
1353 new
= new
.children
[0]
1354 self
.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
1355 (old
.logical_id
[1], new
.logical_id
[1]))
1357 self
.LogInfo("Some disks failed to copy")
1358 self
.LogInfo("The instance will not be affected, aborting operation")
1359 self
.LogInfo("Removing newly created disks of type '%s'..." %
1361 RemoveDisks(self
, self
.instance
, disks
=new_disks
)
1362 self
.LogInfo("Newly created disks removed successfully")
1364 for disk
in new_disks
:
1365 self
.cfg
.ReleaseDRBDMinors(disk
.uuid
)
1366 result
.Raise("Error while converting the instance's template")
1368 # In case of DRBD disk, return its port to the pool
1369 for disk
in old_disks
:
1370 if disk
.dev_type
== constants
.DT_DRBD8
:
1371 tcp_port
= disk
.logical_id
[2]
1372 self
.cfg
.AddTcpUdpPort(tcp_port
)
1374 # Remove old disks from the instance.
1375 feedback_fn("Detaching old disks (%s) from the instance and removing"
1376 " them from cluster config" % old_template
)
1377 for old_disk
in old_disks
:
1378 self
.cfg
.RemoveInstanceDisk(self
.instance
.uuid
, old_disk
.uuid
)
1380 # Attach the new disks to the instance.
1381 feedback_fn("Adding new disks (%s) to cluster config and attaching"
1382 " them to the instance" % template_info
)
1383 for (idx
, new_disk
) in enumerate(new_disks
):
1384 self
.cfg
.AddInstanceDisk(self
.instance
.uuid
, new_disk
, idx
=idx
)
1386 # Re-read the instance from the configuration.
1387 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1389 # Release node locks while waiting for sync and disks removal.
1390 ReleaseLocks(self
, locking
.LEVEL_NODE
)
1392 disk_abort
= not WaitForSync(self
, self
.instance
,
1393 oneshot
=not self
.op
.wait_for_sync
)
1395 raise errors
.OpExecError("There are some degraded disks for"
1396 " this instance, please cleanup manually")
1398 feedback_fn("Removing old block devices of type '%s'..." % old_template
)
1399 RemoveDisks(self
, self
.instance
, disks
=old_disks
)
1401 # Node resource locks will be released by the caller.
1403 def _ConvertPlainToDrbd(self
, feedback_fn
):
1404 """Converts an instance from plain to drbd.
1407 feedback_fn("Converting disk template from 'plain' to 'drbd'")
1409 pnode_uuid
= self
.instance
.primary_node
1410 snode_uuid
= self
.op
.remote_node_uuid
1411 old_disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
1413 assert utils
.AnyDiskOfType(old_disks
, [constants
.DT_PLAIN
])
1415 new_disks
= GenerateDiskTemplate(self
, self
.op
.disk_template
,
1416 self
.instance
.uuid
, pnode_uuid
,
1417 [snode_uuid
], self
.disks_info
,
1419 feedback_fn
, self
.diskparams
)
1420 anno_disks
= rpc
.AnnotateDiskParams(new_disks
, self
.diskparams
)
1421 p_excl_stor
= IsExclusiveStorageEnabledNodeUuid(self
.cfg
, pnode_uuid
)
1422 s_excl_stor
= IsExclusiveStorageEnabledNodeUuid(self
.cfg
, snode_uuid
)
1423 info
= GetInstanceInfoText(self
.instance
)
1424 feedback_fn("Creating additional volumes...")
1425 # first, create the missing data and meta devices
1426 for disk
in anno_disks
:
1427 # unfortunately this is... not too nice
1428 CreateSingleBlockDev(self
, pnode_uuid
, self
.instance
, disk
.children
[1],
1429 info
, True, p_excl_stor
)
1430 for child
in disk
.children
:
1431 CreateSingleBlockDev(self
, snode_uuid
, self
.instance
, child
, info
, True,
1433 # at this stage, all new LVs have been created, we can rename the
1435 feedback_fn("Renaming original volumes...")
1436 rename_list
= [(o
, n
.children
[0].logical_id
)
1437 for (o
, n
) in zip(old_disks
, new_disks
)]
1438 result
= self
.rpc
.call_blockdev_rename(pnode_uuid
, rename_list
)
1439 result
.Raise("Failed to rename original LVs")
1441 feedback_fn("Initializing DRBD devices...")
1442 # all child devices are in place, we can now create the DRBD devices
1444 for disk
in anno_disks
:
1445 for (node_uuid
, excl_stor
) in [(pnode_uuid
, p_excl_stor
),
1446 (snode_uuid
, s_excl_stor
)]:
1447 f_create
= node_uuid
== pnode_uuid
1448 CreateSingleBlockDev(self
, node_uuid
, self
.instance
, disk
, info
,
1449 f_create
, excl_stor
)
1450 except errors
.GenericError
, e
:
1451 feedback_fn("Initializing of DRBD devices failed;"
1452 " renaming back original volumes...")
1453 rename_back_list
= [(n
.children
[0], o
.logical_id
)
1454 for (n
, o
) in zip(new_disks
, old_disks
)]
1455 result
= self
.rpc
.call_blockdev_rename(pnode_uuid
, rename_back_list
)
1456 result
.Raise("Failed to rename LVs back after error %s" % str(e
))
1459 # Remove the old disks from the instance
1460 for old_disk
in old_disks
:
1461 self
.cfg
.RemoveInstanceDisk(self
.instance
.uuid
, old_disk
.uuid
)
1463 # Attach the new disks to the instance
1464 for (idx
, new_disk
) in enumerate(new_disks
):
1465 self
.cfg
.AddInstanceDisk(self
.instance
.uuid
, new_disk
, idx
=idx
)
1467 # re-read the instance from the configuration
1468 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1470 # Release node locks while waiting for sync
1471 ReleaseLocks(self
, locking
.LEVEL_NODE
)
1473 # disks are created, waiting for sync
1474 disk_abort
= not WaitForSync(self
, self
.instance
,
1475 oneshot
=not self
.op
.wait_for_sync
)
1477 raise errors
.OpExecError("There are some degraded disks for"
1478 " this instance, please cleanup manually")
1480 # Node resource locks will be released by caller
1482 def _ConvertDrbdToPlain(self
, feedback_fn
):
1483 """Converts an instance from drbd to plain.
1486 secondary_nodes
= self
.cfg
.GetInstanceSecondaryNodes(self
.instance
.uuid
)
1487 disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
1488 assert len(secondary_nodes
) == 1
1489 assert utils
.AnyDiskOfType(disks
, [constants
.DT_DRBD8
])
1491 feedback_fn("Converting disk template from 'drbd' to 'plain'")
1493 old_disks
= AnnotateDiskParams(self
.instance
, disks
, self
.cfg
)
1494 new_disks
= [d
.children
[0] for d
in disks
]
1496 # copy over size, mode and name and set the correct nodes
1497 for parent
, child
in zip(old_disks
, new_disks
):
1498 child
.size
= parent
.size
1499 child
.mode
= parent
.mode
1500 child
.name
= parent
.name
1501 child
.nodes
= [self
.instance
.primary_node
]
1503 # this is a DRBD disk, return its port to the pool
1504 for disk
in old_disks
:
1505 tcp_port
= disk
.logical_id
[2]
1506 self
.cfg
.AddTcpUdpPort(tcp_port
)
1508 # Remove the old disks from the instance
1509 for old_disk
in old_disks
:
1510 self
.cfg
.RemoveInstanceDisk(self
.instance
.uuid
, old_disk
.uuid
)
1512 # Attach the new disks to the instance
1513 for (idx
, new_disk
) in enumerate(new_disks
):
1514 self
.cfg
.AddInstanceDisk(self
.instance
.uuid
, new_disk
, idx
=idx
)
1516 # re-read the instance from the configuration
1517 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1519 # Release locks in case removing disks takes a while
1520 ReleaseLocks(self
, locking
.LEVEL_NODE
)
1522 feedback_fn("Removing volumes on the secondary node...")
1523 RemoveDisks(self
, self
.instance
, disks
=old_disks
,
1524 target_node_uuid
=secondary_nodes
[0])
1526 feedback_fn("Removing unneeded volumes on the primary node...")
1528 for idx
, disk
in enumerate(old_disks
):
1529 meta_disks
.append(disk
.children
[1])
1530 RemoveDisks(self
, self
.instance
, disks
=meta_disks
)
1532 def _HotplugDevice(self
, action
, dev_type
, device
, extra
, seq
):
1533 self
.LogInfo("Trying to hotplug device...")
1535 result
= self
.rpc
.call_hotplug_device(self
.instance
.primary_node
,
1536 self
.instance
, action
, dev_type
,
1537 (device
, self
.instance
),
1540 self
.LogWarning("Could not hotplug device: %s" % result
.fail_msg
)
1541 self
.LogInfo("Continuing execution..")
1544 self
.LogInfo("Hotplug done.")
1548 def _FillFileDriver(self
):
1549 if not self
.op
.file_driver
:
1550 self
.op
.file_driver
= constants
.FD_DEFAULT
1551 elif self
.op
.file_driver
not in constants
.FILE_DRIVER
:
1552 raise errors
.OpPrereqError("Invalid file driver name '%s'" %
1553 self
.op
.file_driver
, errors
.ECODE_INVAL
)
1555 def _GenerateDiskTemplateWrapper(self
, idx
, disk_type
, params
):
1556 file_path
= CalculateFileStorageDir(
1557 disk_type
, self
.cfg
, self
.instance
.name
,
1558 file_storage_dir
=self
.op
.file_storage_dir
)
1560 self
._FillFileDriver()
1562 secondary_nodes
= self
.cfg
.GetInstanceSecondaryNodes(self
.instance
.uuid
)
1564 GenerateDiskTemplate(self
, disk_type
, self
.instance
.uuid
,
1565 self
.instance
.primary_node
, secondary_nodes
,
1566 [params
], file_path
, self
.op
.file_driver
, idx
,
1567 self
.Log
, self
.diskparams
)[0]
1569 def _CreateNewDisk(self
, idx
, params
, _
):
1570 """Creates a new disk.
1574 disk_template
= self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
)
1575 disk
= self
._GenerateDiskTemplateWrapper(idx
, disk_template
,
1577 new_disks
= CreateDisks(self
, self
.instance
, disks
=[disk
])
1578 self
.cfg
.AddInstanceDisk(self
.instance
.uuid
, disk
, idx
)
1580 # re-read the instance from the configuration
1581 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1583 if self
.cluster
.prealloc_wipe_disks
:
1585 WipeOrCleanupDisks(self
, self
.instance
,
1586 disks
=[(idx
, disk
, 0)],
1591 "add:size=%s,mode=%s" % (disk
.size
, disk
.mode
)),
1594 result
= self
.rpc
.call_blockdev_assemble(self
.instance
.primary_node
,
1595 (disk
, self
.instance
),
1596 self
.instance
, True, idx
)
1598 changes
.append(("disk/%d" % idx
, "assemble:failed"))
1599 self
.LogWarning("Can't assemble newly created disk %d: %s",
1600 idx
, result
.fail_msg
)
1602 _
, link_name
, uri
= result
.payload
1603 msg
= self
._HotplugDevice(constants
.HOTPLUG_ACTION_ADD
,
1604 constants
.HOTPLUG_TARGET_DISK
,
1605 disk
, (link_name
, uri
), idx
)
1606 changes
.append(("disk/%d" % idx
, msg
))
1608 return (disk
, changes
)
1610 def _PostAddDisk(self
, _
, disk
):
1611 if not WaitForSync(self
, self
.instance
, disks
=[disk
],
1612 oneshot
=not self
.op
.wait_for_sync
):
1613 raise errors
.OpExecError("Failed to sync disks of %s" %
1616 # the disk is active at this point, so deactivate it if the instance disks
1617 # are supposed to be inactive
1618 if not self
.instance
.disks_active
:
1619 ShutdownInstanceDisks(self
, self
.instance
, disks
=[disk
])
1621 def _AttachDisk(self
, idx
, params
, _
):
1622 """Attaches an existing disk to an instance.
1625 uuid
= params
.get("uuid", None)
1626 name
= params
.get(constants
.IDISK_NAME
, None)
1628 disk
= self
.GenericGetDiskInfo(uuid
, name
)
1630 # Rename disk before attaching (if disk is filebased)
1631 if disk
.dev_type
in (constants
.DTS_INSTANCE_DEPENDENT_PATH
):
1632 # Add disk size/mode, else GenerateDiskTemplate will not work.
1633 params
[constants
.IDISK_SIZE
] = disk
.size
1634 params
[constants
.IDISK_MODE
] = str(disk
.mode
)
1635 dummy_disk
= self
._GenerateDiskTemplateWrapper(idx
, disk
.dev_type
, params
)
1636 new_logical_id
= dummy_disk
.logical_id
1637 result
= self
.rpc
.call_blockdev_rename(self
.instance
.primary_node
,
1638 [(disk
, new_logical_id
)])
1639 result
.Raise("Failed before attach")
1640 self
.cfg
.SetDiskLogicalID(disk
.uuid
, new_logical_id
)
1641 disk
.logical_id
= new_logical_id
1643 # Attach disk to instance
1644 self
.cfg
.AttachInstanceDisk(self
.instance
.uuid
, disk
.uuid
, idx
)
1646 # re-read the instance from the configuration
1647 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1651 "attach:size=%s,mode=%s" % (disk
.size
, disk
.mode
)),
1654 disks_ok
, _
, payloads
= AssembleInstanceDisks(self
, self
.instance
,
1657 changes
.append(("disk/%d" % idx
, "assemble:failed"))
1658 return disk
, changes
1661 _
, link_name
, uri
= payloads
[0]
1662 msg
= self
._HotplugDevice(constants
.HOTPLUG_ACTION_ADD
,
1663 constants
.HOTPLUG_TARGET_DISK
,
1664 disk
, (link_name
, uri
), idx
)
1665 changes
.append(("disk/%d" % idx
, msg
))
1667 return (disk
, changes
)
1669 def _ModifyDisk(self
, idx
, disk
, params
, _
):
1674 if constants
.IDISK_MODE
in params
:
1675 disk
.mode
= params
.get(constants
.IDISK_MODE
)
1676 changes
.append(("disk.mode/%d" % idx
, disk
.mode
))
1678 if constants
.IDISK_NAME
in params
:
1679 disk
.name
= params
.get(constants
.IDISK_NAME
)
1680 changes
.append(("disk.name/%d" % idx
, disk
.name
))
1682 # Modify arbitrary params in case instance template is ext
1684 for key
, value
in params
.iteritems():
1685 if (key
not in constants
.MODIFIABLE_IDISK_PARAMS
and
1686 disk
.dev_type
== constants
.DT_EXT
):
1687 # stolen from GetUpdatedParams: default means reset/delete
1688 if value
.lower() == constants
.VALUE_DEFAULT
:
1690 del disk
.params
[key
]
1694 disk
.params
[key
] = value
1695 changes
.append(("disk.params:%s/%d" % (key
, idx
), value
))
1697 # Update disk object
1698 self
.cfg
.Update(disk
, self
.feedback_fn
)
1702 def _RemoveDisk(self
, idx
, root
, _
):
1708 hotmsg
= self
._HotplugDevice(constants
.HOTPLUG_ACTION_REMOVE
,
1709 constants
.HOTPLUG_TARGET_DISK
,
1711 ShutdownInstanceDisks(self
, self
.instance
, [root
])
1713 RemoveDisks(self
, self
.instance
, disks
=[root
])
1715 # if this is a DRBD disk, return its port to the pool
1716 if root
.dev_type
in constants
.DTS_DRBD
:
1717 self
.cfg
.AddTcpUdpPort(root
.logical_id
[2])
1719 # Remove disk from config
1720 self
.cfg
.RemoveInstanceDisk(self
.instance
.uuid
, root
.uuid
)
1722 # re-read the instance from the configuration
1723 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1727 def _DetachDisk(self
, idx
, root
, _
):
1728 """Detaches a disk from an instance.
1733 hotmsg
= self
._HotplugDevice(constants
.HOTPLUG_ACTION_REMOVE
,
1734 constants
.HOTPLUG_TARGET_DISK
,
1737 # Always shutdown the disk before detaching.
1738 ShutdownInstanceDisks(self
, self
.instance
, [root
])
1740 # Rename detached disk.
1742 # Transform logical_id from:
1743 # <file_storage_dir>/<instance_name>/<disk_name>
1745 # <file_storage_dir>/<disk_name>
1746 if root
.dev_type
in (constants
.DT_FILE
, constants
.DT_SHARED_FILE
):
1747 file_driver
= root
.logical_id
[0]
1748 instance_path
, disk_name
= os
.path
.split(root
.logical_id
[1])
1749 new_path
= os
.path
.join(os
.path
.dirname(instance_path
), disk_name
)
1750 new_logical_id
= (file_driver
, new_path
)
1751 result
= self
.rpc
.call_blockdev_rename(self
.instance
.primary_node
,
1752 [(root
, new_logical_id
)])
1753 result
.Raise("Failed before detach")
1755 self
.cfg
.SetDiskLogicalID(root
.uuid
, new_logical_id
)
1757 # Remove disk from config
1758 self
.cfg
.DetachInstanceDisk(self
.instance
.uuid
, root
.uuid
)
1760 # re-read the instance from the configuration
1761 self
.instance
= self
.cfg
.GetInstanceInfo(self
.instance
.uuid
)
1765 def _CreateNewNic(self
, idx
, params
, private
):
1766 """Creates data structure for a new network interface.
1769 mac
= params
[constants
.INIC_MAC
]
1770 ip
= params
.get(constants
.INIC_IP
, None)
1771 net
= params
.get(constants
.INIC_NETWORK
, None)
1772 name
= params
.get(constants
.INIC_NAME
, None)
1773 net_uuid
= self
.cfg
.LookupNetwork(net
)
1774 #TODO: not private.filled?? can a nic have no nicparams??
1775 nicparams
= private
.filled
1776 nobj
= objects
.NIC(mac
=mac
, ip
=ip
, network
=net_uuid
, name
=name
,
1777 nicparams
=nicparams
)
1778 nobj
.uuid
= self
.cfg
.GenerateUniqueID(self
.proc
.GetECId())
1782 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
1783 (mac
, ip
, private
.filled
[constants
.NIC_MODE
],
1784 private
.filled
[constants
.NIC_LINK
], net
)),
1788 msg
= self
._HotplugDevice(constants
.HOTPLUG_ACTION_ADD
,
1789 constants
.HOTPLUG_TARGET_NIC
,
1791 changes
.append(("nic.%d" % idx
, msg
))
1793 return (nobj
, changes
)
1795 def _ApplyNicMods(self
, idx
, nic
, params
, private
):
1796 """Modifies a network interface.
1801 for key
in [constants
.INIC_MAC
, constants
.INIC_IP
, constants
.INIC_NAME
]:
1803 changes
.append(("nic.%s/%d" % (key
, idx
), params
[key
]))
1804 setattr(nic
, key
, params
[key
])
1806 new_net
= params
.get(constants
.INIC_NETWORK
, nic
.network
)
1807 new_net_uuid
= self
.cfg
.LookupNetwork(new_net
)
1808 if new_net_uuid
!= nic
.network
:
1809 changes
.append(("nic.network/%d" % idx
, new_net
))
1810 nic
.network
= new_net_uuid
1813 nic
.nicparams
= private
.filled
1815 for (key
, val
) in nic
.nicparams
.items():
1816 changes
.append(("nic.%s/%d" % (key
, idx
), val
))
1819 msg
= self
._HotplugDevice(constants
.HOTPLUG_ACTION_MODIFY
,
1820 constants
.HOTPLUG_TARGET_NIC
,
1822 changes
.append(("nic/%d" % idx
, msg
))
1826 def _RemoveNic(self
, idx
, nic
, _
):
1828 return self
._HotplugDevice(constants
.HOTPLUG_ACTION_REMOVE
,
1829 constants
.HOTPLUG_TARGET_NIC
,
1832 def Exec(self
, feedback_fn
):
1833 """Modifies an instance.
1835 All parameters take effect only at the next restart of the instance.
1838 self
.feedback_fn
= feedback_fn
1839 # Process here the warnings from CheckPrereq, as we don't have a
1840 # feedback_fn there.
1841 # TODO: Replace with self.LogWarning
1842 for warn
in self
.warn
:
1843 feedback_fn("WARNING: %s" % warn
)
1845 assert ((self
.op
.disk_template
is None) ^
1846 bool(self
.owned_locks(locking
.LEVEL_NODE_RES
))), \
1847 "Not owning any node resource locks"
1852 if self
.op
.pnode_uuid
:
1853 self
.instance
.primary_node
= self
.op
.pnode_uuid
1856 if self
.op
.runtime_mem
:
1857 rpcres
= self
.rpc
.call_instance_balloon_memory(self
.instance
.primary_node
,
1859 self
.op
.runtime_mem
)
1860 rpcres
.Raise("Cannot modify instance runtime memory")
1861 result
.append(("runtime_memory", self
.op
.runtime_mem
))
1863 # Apply disk changes
1864 inst_disks
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
1865 ApplyContainerMods("disk", inst_disks
, result
, self
.diskmod
,
1866 self
._CreateNewDisk
, self
._AttachDisk
, self
._ModifyDisk
,
1867 self
._RemoveDisk
, self
._DetachDisk
,
1868 post_add_fn
=self
._PostAddDisk
)
1870 if self
.op
.disk_template
:
1872 check_nodes
= set(self
.cfg
.GetInstanceNodes(self
.instance
.uuid
))
1873 if self
.op
.remote_node_uuid
:
1874 check_nodes
.add(self
.op
.remote_node_uuid
)
1875 for level
in [locking
.LEVEL_NODE
, locking
.LEVEL_NODE_RES
]:
1876 owned
= self
.owned_locks(level
)
1877 assert not (check_nodes
- owned
), \
1878 ("Not owning the correct locks, owning %r, expected at least %r" %
1879 (owned
, check_nodes
))
1881 r_shut
= ShutdownInstanceDisks(self
, self
.instance
)
1883 raise errors
.OpExecError("Cannot shutdown instance disks, unable to"
1884 " proceed with disk template conversion")
1885 #TODO make heterogeneous conversions work
1886 mode
= (self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
),
1887 self
.op
.disk_template
)
1889 if mode
in self
._DISK_CONVERSIONS
:
1890 self
._DISK_CONVERSIONS
[mode
](self
, feedback_fn
)
1892 self
._ConvertInstanceDisks(feedback_fn
)
1894 for disk
in inst_disks
:
1895 self
.cfg
.ReleaseDRBDMinors(disk
.uuid
)
1897 result
.append(("disk_template", self
.op
.disk_template
))
1899 disk_info
= self
.cfg
.GetInstanceDisks(self
.instance
.uuid
)
1900 assert utils
.AllDiskOfType(disk_info
, [self
.op
.disk_template
]), \
1901 ("Expected disk template '%s', found '%s'" %
1902 (self
.op
.disk_template
,
1903 self
.cfg
.GetInstanceDiskTemplate(self
.instance
.uuid
)))
1905 # Release node and resource locks if there are any (they might already have
1906 # been released during disk conversion)
1907 ReleaseLocks(self
, locking
.LEVEL_NODE
)
1908 ReleaseLocks(self
, locking
.LEVEL_NODE_RES
)
1911 if self
._new_nics
is not None:
1912 self
.instance
.nics
= self
._new_nics
1913 result
.extend(self
._nic_chgdesc
)
1916 if self
.op
.hvparams
:
1917 self
.instance
.hvparams
= self
.hv_inst
1918 for key
, val
in self
.op
.hvparams
.iteritems():
1919 result
.append(("hv/%s" % key
, val
))
1922 if self
.op
.beparams
:
1923 self
.instance
.beparams
= self
.be_inst
1924 for key
, val
in self
.op
.beparams
.iteritems():
1925 result
.append(("be/%s" % key
, val
))
1929 self
.instance
.os
= self
.op
.os_name
1932 if self
.op
.osparams
:
1933 self
.instance
.osparams
= self
.os_inst
1934 for key
, val
in self
.op
.osparams
.iteritems():
1935 result
.append(("os/%s" % key
, val
))
1937 if self
.op
.osparams_private
:
1938 self
.instance
.osparams_private
= self
.os_inst_private
1939 for key
, val
in self
.op
.osparams_private
.iteritems():
1940 # Show the Private(...) blurb.
1941 result
.append(("os_private/%s" % key
, repr(val
)))
1943 self
.cfg
.Update(self
.instance
, feedback_fn
, self
.proc
.GetECId())
1945 if self
.op
.offline
is None:
1948 elif self
.op
.offline
:
1949 # Mark instance as offline
1950 self
.instance
= self
.cfg
.MarkInstanceOffline(self
.instance
.uuid
)
1951 result
.append(("admin_state", constants
.ADMINST_OFFLINE
))
1953 # Mark instance as online, but stopped
1954 self
.instance
= self
.cfg
.MarkInstanceDown(self
.instance
.uuid
)
1955 result
.append(("admin_state", constants
.ADMINST_DOWN
))
1957 UpdateMetadata(feedback_fn
, self
.rpc
, self
.instance
)
1959 assert not (self
.owned_locks(locking
.LEVEL_NODE_RES
) or
1960 self
.owned_locks(locking
.LEVEL_NODE
)), \
1961 "All node locks should have been released by now"
1965 _DISK_CONVERSIONS
= {
1966 (constants
.DT_PLAIN
, constants
.DT_DRBD8
): _ConvertPlainToDrbd
,
1967 (constants
.DT_DRBD8
, constants
.DT_PLAIN
): _ConvertDrbdToPlain
,