Merge branch 'stable-2.12' into stable-2.13
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Logical units dealing with instances."""
32
33 import OpenSSL
34 import copy
35 import logging
36 import os
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import ht
42 from ganeti import hypervisor
43 from ganeti import locking
44 from ganeti.masterd import iallocator
45 from ganeti import masterd
46 from ganeti import netutils
47 from ganeti import objects
48 from ganeti import pathutils
49 from ganeti import serializer
50 import ganeti.rpc.node as rpc
51 from ganeti import utils
52 from ganeti.utils import retry
53
54 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
55
56 from ganeti.cmdlib.common import INSTANCE_DOWN, \
57 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
58 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
59 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
60 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
61 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
62 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
63 CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination, \
64 DetermineImageSize, IsInstanceRunning
65 from ganeti.cmdlib.instance_storage import CreateDisks, \
66 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
67 WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
68 ComputeDisks, ComputeDisksInfo, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
69 GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
70 AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk, \
71 CalculateFileStorageDir
72 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
73 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
74 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
75 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
76 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
77 CheckInstanceBridgesExist, CheckNicsBridgesExist, UpdateMetadata, \
78 CheckCompressionTool, CheckInstanceExistence
79 import ganeti.masterd.instance
80
81
82 #: Type description for changes as returned by L{_ApplyContainerMods}'s
83 #: callbacks
84 _TApplyContModsCbChanges = \
85 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
86 ht.TNonEmptyString,
87 ht.TAny,
88 ])))
89
90
91 def _CheckHostnameSane(lu, name):
92 """Ensures that a given hostname resolves to a 'sane' name.
93
94 The given name is required to be a prefix of the resolved hostname,
95 to prevent accidental mismatches.
96
97 @param lu: the logical unit on behalf of which we're checking
98 @param name: the name we should resolve and check
99 @return: the resolved hostname object
100
101 """
102 hostname = netutils.GetHostname(name=name)
103 if hostname.name != name:
104 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
105 if not utils.MatchNameComponent(name, [hostname.name]):
106 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
107 " same as given hostname '%s'") %
108 (hostname.name, name), errors.ECODE_INVAL)
109 return hostname
110
111
112 def _CheckOpportunisticLocking(op):
113 """Generate error if opportunistic locking is not possible.
114
115 """
116 if op.opportunistic_locking and not op.iallocator:
117 raise errors.OpPrereqError("Opportunistic locking is only available in"
118 " combination with an instance allocator",
119 errors.ECODE_INVAL)
120
121
122 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
123 """Wrapper around IAReqInstanceAlloc.
124
125 @param op: The instance opcode
126 @param disks: The computed disks
127 @param nics: The computed nics
128 @param beparams: The full filled beparams
129 @param node_name_whitelist: List of nodes which should appear as online to the
130 allocator (unless the node is already marked offline)
131
132 @returns: A filled L{iallocator.IAReqInstanceAlloc}
133
134 """
135 spindle_use = beparams[constants.BE_SPINDLE_USE]
136 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
137 disk_template=op.disk_template,
138 group_name=op.group_name,
139 tags=op.tags,
140 os=op.os_type,
141 vcpus=beparams[constants.BE_VCPUS],
142 memory=beparams[constants.BE_MAXMEM],
143 spindle_use=spindle_use,
144 disks=disks,
145 nics=[n.ToDict() for n in nics],
146 hypervisor=op.hypervisor,
147 node_whitelist=node_name_whitelist)
148
149
150 def _ComputeFullBeParams(op, cluster):
151 """Computes the full beparams.
152
153 @param op: The instance opcode
154 @param cluster: The cluster config object
155
156 @return: The fully filled beparams
157
158 """
159 default_beparams = cluster.beparams[constants.PP_DEFAULT]
160 for param, value in op.beparams.iteritems():
161 if value == constants.VALUE_AUTO:
162 op.beparams[param] = default_beparams[param]
163 objects.UpgradeBeParams(op.beparams)
164 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
165 return cluster.SimpleFillBE(op.beparams)
166
167
168 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
169 """Computes the nics.
170
171 @param op: The instance opcode
172 @param cluster: Cluster configuration object
173 @param default_ip: The default ip to assign
174 @param cfg: An instance of the configuration object
175 @param ec_id: Execution context ID
176
177 @returns: The build up nics
178
179 """
180 nics = []
181 for nic in op.nics:
182 nic_mode_req = nic.get(constants.INIC_MODE, None)
183 nic_mode = nic_mode_req
184 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
185 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
186
187 net = nic.get(constants.INIC_NETWORK, None)
188 link = nic.get(constants.NIC_LINK, None)
189 ip = nic.get(constants.INIC_IP, None)
190 vlan = nic.get(constants.INIC_VLAN, None)
191
192 if net is None or net.lower() == constants.VALUE_NONE:
193 net = None
194 else:
195 if nic_mode_req is not None or link is not None:
196 raise errors.OpPrereqError("If network is given, no mode or link"
197 " is allowed to be passed",
198 errors.ECODE_INVAL)
199
200 # ip validity checks
201 if ip is None or ip.lower() == constants.VALUE_NONE:
202 nic_ip = None
203 elif ip.lower() == constants.VALUE_AUTO:
204 if not op.name_check:
205 raise errors.OpPrereqError("IP address set to auto but name checks"
206 " have been skipped",
207 errors.ECODE_INVAL)
208 nic_ip = default_ip
209 else:
210 # We defer pool operations until later, so that the iallocator has
211 # filled in the instance's node(s) dimara
212 if ip.lower() == constants.NIC_IP_POOL:
213 if net is None:
214 raise errors.OpPrereqError("if ip=pool, parameter network"
215 " must be passed too",
216 errors.ECODE_INVAL)
217
218 elif not netutils.IPAddress.IsValid(ip):
219 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
220 errors.ECODE_INVAL)
221
222 nic_ip = ip
223
224 # TODO: check the ip address for uniqueness
225 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net:
226 raise errors.OpPrereqError("Routed nic mode requires an ip address"
227 " if not attached to a network",
228 errors.ECODE_INVAL)
229
230 # MAC address verification
231 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
232 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
233 mac = utils.NormalizeAndValidateMac(mac)
234
235 try:
236 # TODO: We need to factor this out
237 cfg.ReserveMAC(mac, ec_id)
238 except errors.ReservationError:
239 raise errors.OpPrereqError("MAC address %s already in use"
240 " in cluster" % mac,
241 errors.ECODE_NOTUNIQUE)
242
243 # Build nic parameters
244 nicparams = {}
245 if nic_mode_req:
246 nicparams[constants.NIC_MODE] = nic_mode
247 if link:
248 nicparams[constants.NIC_LINK] = link
249 if vlan:
250 nicparams[constants.NIC_VLAN] = vlan
251
252 check_params = cluster.SimpleFillNIC(nicparams)
253 objects.NIC.CheckParameterSyntax(check_params)
254 net_uuid = cfg.LookupNetwork(net)
255 name = nic.get(constants.INIC_NAME, None)
256 if name is not None and name.lower() == constants.VALUE_NONE:
257 name = None
258 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
259 network=net_uuid, nicparams=nicparams)
260 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
261 nics.append(nic_obj)
262
263 return nics
264
265
266 def _CheckForConflictingIp(lu, ip, node_uuid):
267 """In case of conflicting IP address raise error.
268
269 @type ip: string
270 @param ip: IP address
271 @type node_uuid: string
272 @param node_uuid: node UUID
273
274 """
275 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
276 if conf_net is not None:
277 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
278 " network %s, but the target NIC does not." %
279 (ip, conf_net)),
280 errors.ECODE_STATE)
281
282 return (None, None)
283
284
285 def _ComputeIPolicyInstanceSpecViolation(
286 ipolicy, instance_spec, disk_template,
287 _compute_fn=ComputeIPolicySpecViolation):
288 """Compute if instance specs meets the specs of ipolicy.
289
290 @type ipolicy: dict
291 @param ipolicy: The ipolicy to verify against
292 @param instance_spec: dict
293 @param instance_spec: The instance spec to verify
294 @type disk_template: string
295 @param disk_template: the disk template of the instance
296 @param _compute_fn: The function to verify ipolicy (unittest only)
297 @see: L{ComputeIPolicySpecViolation}
298
299 """
300 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
301 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
302 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
303 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
304 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
305 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
306
307 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
308 disk_sizes, spindle_use, disk_template)
309
310
311 def _ComputeInstanceCommunicationNIC(instance_name):
312 """Compute the name of the instance NIC used by instance
313 communication.
314
315 With instance communication, a new NIC is added to the instance.
316 This NIC has a special name that identities it as being part of
317 instance communication, and not just a normal NIC. This function
318 generates the name of the NIC based on a prefix and the instance
319 name
320
321 @type instance_name: string
322 @param instance_name: name of the instance the NIC belongs to
323
324 @rtype: string
325 @return: name of the NIC
326
327 """
328 return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
329
330
331 class LUInstanceCreate(LogicalUnit):
332 """Create an instance.
333
334 """
335 HPATH = "instance-add"
336 HTYPE = constants.HTYPE_INSTANCE
337 REQ_BGL = False
338
339 def _CheckDiskTemplateValid(self):
340 """Checks validity of disk template.
341
342 """
343 cluster = self.cfg.GetClusterInfo()
344 if self.op.disk_template is None:
345 # FIXME: It would be better to take the default disk template from the
346 # ipolicy, but for the ipolicy we need the primary node, which we get from
347 # the iallocator, which wants the disk template as input. To solve this
348 # chicken-and-egg problem, it should be possible to specify just a node
349 # group from the iallocator and take the ipolicy from that.
350 self.op.disk_template = cluster.enabled_disk_templates[0]
351 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
352
353 def _CheckDiskArguments(self):
354 """Checks validity of disk-related arguments.
355
356 """
357 # check that disk's names are unique and valid
358 utils.ValidateDeviceNames("disk", self.op.disks)
359
360 self._CheckDiskTemplateValid()
361
362 # check disks. parameter names and consistent adopt/no-adopt strategy
363 has_adopt = has_no_adopt = False
364 for disk in self.op.disks:
365 if self.op.disk_template != constants.DT_EXT:
366 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
367 if constants.IDISK_ADOPT in disk:
368 has_adopt = True
369 else:
370 has_no_adopt = True
371 if has_adopt and has_no_adopt:
372 raise errors.OpPrereqError("Either all disks are adopted or none is",
373 errors.ECODE_INVAL)
374 if has_adopt:
375 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
376 raise errors.OpPrereqError("Disk adoption is not supported for the"
377 " '%s' disk template" %
378 self.op.disk_template,
379 errors.ECODE_INVAL)
380 if self.op.iallocator is not None:
381 raise errors.OpPrereqError("Disk adoption not allowed with an"
382 " iallocator script", errors.ECODE_INVAL)
383 if self.op.mode == constants.INSTANCE_IMPORT:
384 raise errors.OpPrereqError("Disk adoption not allowed for"
385 " instance import", errors.ECODE_INVAL)
386 else:
387 if self.op.disk_template in constants.DTS_MUST_ADOPT:
388 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
389 " but no 'adopt' parameter given" %
390 self.op.disk_template,
391 errors.ECODE_INVAL)
392
393 self.adopt_disks = has_adopt
394
395 def _CheckVLANArguments(self):
396 """ Check validity of VLANs if given
397
398 """
399 for nic in self.op.nics:
400 vlan = nic.get(constants.INIC_VLAN, None)
401 if vlan:
402 if vlan[0] == ".":
403 # vlan starting with dot means single untagged vlan,
404 # might be followed by trunk (:)
405 if not vlan[1:].isdigit():
406 vlanlist = vlan[1:].split(':')
407 for vl in vlanlist:
408 if not vl.isdigit():
409 raise errors.OpPrereqError("Specified VLAN parameter is "
410 "invalid : %s" % vlan,
411 errors.ECODE_INVAL)
412 elif vlan[0] == ":":
413 # Trunk - tagged only
414 vlanlist = vlan[1:].split(':')
415 for vl in vlanlist:
416 if not vl.isdigit():
417 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418 " : %s" % vlan, errors.ECODE_INVAL)
419 elif vlan.isdigit():
420 # This is the simplest case. No dots, only single digit
421 # -> Create untagged access port, dot needs to be added
422 nic[constants.INIC_VLAN] = "." + vlan
423 else:
424 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
425 " : %s" % vlan, errors.ECODE_INVAL)
426
427 def CheckArguments(self):
428 """Check arguments.
429
430 """
431 # do not require name_check to ease forward/backward compatibility
432 # for tools
433 if self.op.no_install and self.op.start:
434 self.LogInfo("No-installation mode selected, disabling startup")
435 self.op.start = False
436 # validate/normalize the instance name
437 self.op.instance_name = \
438 netutils.Hostname.GetNormalizedName(self.op.instance_name)
439
440 if self.op.ip_check and not self.op.name_check:
441 # TODO: make the ip check more flexible and not depend on the name check
442 raise errors.OpPrereqError("Cannot do IP address check without a name"
443 " check", errors.ECODE_INVAL)
444
445 # instance name verification
446 if self.op.name_check:
447 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
448 self.op.instance_name = self.hostname.name
449 # used in CheckPrereq for ip ping check
450 self.check_ip = self.hostname.ip
451 else:
452 self.check_ip = None
453
454 # add NIC for instance communication
455 if self.op.instance_communication:
456 nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
457
458 for nic in self.op.nics:
459 if nic.get(constants.INIC_NAME, None) == nic_name:
460 break
461 else:
462 self.op.nics.append({constants.INIC_NAME: nic_name,
463 constants.INIC_MAC: constants.VALUE_GENERATE,
464 constants.INIC_IP: constants.NIC_IP_POOL,
465 constants.INIC_NETWORK:
466 self.cfg.GetInstanceCommunicationNetwork()})
467
468 # timeouts for unsafe OS installs
469 if self.op.helper_startup_timeout is None:
470 self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP
471
472 if self.op.helper_shutdown_timeout is None:
473 self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN
474
475 # check nics' parameter names
476 for nic in self.op.nics:
477 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
478 # check that NIC's parameters names are unique and valid
479 utils.ValidateDeviceNames("NIC", self.op.nics)
480
481 self._CheckVLANArguments()
482
483 self._CheckDiskArguments()
484 assert self.op.disk_template is not None
485
486 # file storage checks
487 if (self.op.file_driver and
488 not self.op.file_driver in constants.FILE_DRIVER):
489 raise errors.OpPrereqError("Invalid file driver name '%s'" %
490 self.op.file_driver, errors.ECODE_INVAL)
491
492 # set default file_driver if unset and required
493 if (not self.op.file_driver and
494 self.op.disk_template in constants.DTS_FILEBASED):
495 self.op.file_driver = constants.FD_DEFAULT
496
497 ### Node/iallocator related checks
498 CheckIAllocatorOrNode(self, "iallocator", "pnode")
499
500 if self.op.pnode is not None:
501 if self.op.disk_template in constants.DTS_INT_MIRROR:
502 if self.op.snode is None:
503 raise errors.OpPrereqError("The networked disk templates need"
504 " a mirror node", errors.ECODE_INVAL)
505 elif self.op.snode:
506 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
507 " template")
508 self.op.snode = None
509
510 _CheckOpportunisticLocking(self.op)
511
512 if self.op.mode == constants.INSTANCE_IMPORT:
513 # On import force_variant must be True, because if we forced it at
514 # initial install, our only chance when importing it back is that it
515 # works again!
516 self.op.force_variant = True
517
518 if self.op.no_install:
519 self.LogInfo("No-installation mode has no effect during import")
520
521 if objects.GetOSImage(self.op.osparams):
522 self.LogInfo("OS image has no effect during import")
523 elif self.op.mode == constants.INSTANCE_CREATE:
524 os_image = CheckOSImage(self.op)
525
526 if self.op.os_type is None and os_image is None:
527 raise errors.OpPrereqError("No guest OS or OS image specified",
528 errors.ECODE_INVAL)
529
530 if self.op.os_type is not None \
531 and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
532 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
533 " installation" % self.op.os_type,
534 errors.ECODE_STATE)
535 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
536 if objects.GetOSImage(self.op.osparams):
537 self.LogInfo("OS image has no effect during import")
538
539 self._cds = GetClusterDomainSecret()
540
541 # Check handshake to ensure both clusters have the same domain secret
542 src_handshake = self.op.source_handshake
543 if not src_handshake:
544 raise errors.OpPrereqError("Missing source handshake",
545 errors.ECODE_INVAL)
546
547 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
548 src_handshake)
549 if errmsg:
550 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
551 errors.ECODE_INVAL)
552
553 # Load and check source CA
554 self.source_x509_ca_pem = self.op.source_x509_ca
555 if not self.source_x509_ca_pem:
556 raise errors.OpPrereqError("Missing source X509 CA",
557 errors.ECODE_INVAL)
558
559 try:
560 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
561 self._cds)
562 except OpenSSL.crypto.Error, err:
563 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
564 (err, ), errors.ECODE_INVAL)
565
566 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
567 if errcode is not None:
568 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
569 errors.ECODE_INVAL)
570
571 self.source_x509_ca = cert
572
573 src_instance_name = self.op.source_instance_name
574 if not src_instance_name:
575 raise errors.OpPrereqError("Missing source instance name",
576 errors.ECODE_INVAL)
577
578 self.source_instance_name = \
579 netutils.GetHostname(name=src_instance_name).name
580
581 else:
582 raise errors.OpPrereqError("Invalid instance creation mode %r" %
583 self.op.mode, errors.ECODE_INVAL)
584
585 def ExpandNames(self):
586 """ExpandNames for CreateInstance.
587
588 Figure out the right locks for instance creation.
589
590 """
591 self.needed_locks = {}
592
593 # this is just a preventive check, but someone might still add this
594 # instance in the meantime, and creation will fail at lock-add time
595 CheckInstanceExistence(self, self.op.instance_name)
596
597 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
598
599 if self.op.iallocator:
600 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
601 # specifying a group on instance creation and then selecting nodes from
602 # that group
603 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
604 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
605
606 if self.op.opportunistic_locking:
607 self.opportunistic_locks[locking.LEVEL_NODE] = True
608 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
609 if self.op.disk_template == constants.DT_DRBD8:
610 self.opportunistic_locks_count[locking.LEVEL_NODE] = 2
611 self.opportunistic_locks_count[locking.LEVEL_NODE_RES] = 2
612 else:
613 (self.op.pnode_uuid, self.op.pnode) = \
614 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
615 nodelist = [self.op.pnode_uuid]
616 if self.op.snode is not None:
617 (self.op.snode_uuid, self.op.snode) = \
618 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
619 nodelist.append(self.op.snode_uuid)
620 self.needed_locks[locking.LEVEL_NODE] = nodelist
621
622 # in case of import lock the source node too
623 if self.op.mode == constants.INSTANCE_IMPORT:
624 src_node = self.op.src_node
625 src_path = self.op.src_path
626
627 if src_path is None:
628 self.op.src_path = src_path = self.op.instance_name
629
630 if src_node is None:
631 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
632 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
633 self.op.src_node = None
634 if os.path.isabs(src_path):
635 raise errors.OpPrereqError("Importing an instance from a path"
636 " requires a source node option",
637 errors.ECODE_INVAL)
638 else:
639 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
640 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
641 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
642 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
643 if not os.path.isabs(src_path):
644 self.op.src_path = \
645 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
646
647 self.needed_locks[locking.LEVEL_NODE_RES] = \
648 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
649
650 # Optimistically acquire shared group locks (we're reading the
651 # configuration). We can't just call GetInstanceNodeGroups, because the
652 # instance doesn't exist yet. Therefore we lock all node groups of all
653 # nodes we have.
654 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
655 # In the case we lock all nodes for opportunistic allocation, we have no
656 # choice than to lock all groups, because they're allocated before nodes.
657 # This is sad, but true. At least we release all those we don't need in
658 # CheckPrereq later.
659 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
660 else:
661 self.needed_locks[locking.LEVEL_NODEGROUP] = \
662 list(self.cfg.GetNodeGroupsFromNodes(
663 self.needed_locks[locking.LEVEL_NODE]))
664 self.share_locks[locking.LEVEL_NODEGROUP] = 1
665
666 def _RunAllocator(self):
667 """Run the allocator based on input opcode.
668
669 """
670 if self.op.opportunistic_locking:
671 # Only consider nodes for which a lock is held
672 node_name_whitelist = self.cfg.GetNodeNames(
673 set(self.owned_locks(locking.LEVEL_NODE)) &
674 set(self.owned_locks(locking.LEVEL_NODE_RES)))
675 else:
676 node_name_whitelist = None
677
678 req = _CreateInstanceAllocRequest(self.op, self.disks,
679 self.nics, self.be_full,
680 node_name_whitelist)
681 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
682
683 ial.Run(self.op.iallocator)
684
685 if not ial.success:
686 # When opportunistic locks are used only a temporary failure is generated
687 if self.op.opportunistic_locking:
688 ecode = errors.ECODE_TEMP_NORES
689 self.LogInfo("IAllocator '%s' failed on opportunistically acquired"
690 " nodes: %s", self.op.iallocator, ial.info)
691 else:
692 ecode = errors.ECODE_NORES
693
694 raise errors.OpPrereqError("Can't compute nodes using"
695 " iallocator '%s': %s" %
696 (self.op.iallocator, ial.info),
697 ecode)
698
699 (self.op.pnode_uuid, self.op.pnode) = \
700 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
701 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
702 self.op.instance_name, self.op.iallocator,
703 utils.CommaJoin(ial.result))
704
705 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
706
707 if req.RequiredNodes() == 2:
708 (self.op.snode_uuid, self.op.snode) = \
709 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
710
711 def BuildHooksEnv(self):
712 """Build hooks env.
713
714 This runs on master, primary and secondary nodes of the instance.
715
716 """
717 env = {
718 "ADD_MODE": self.op.mode,
719 }
720 if self.op.mode == constants.INSTANCE_IMPORT:
721 env["SRC_NODE"] = self.op.src_node
722 env["SRC_PATH"] = self.op.src_path
723 env["SRC_IMAGES"] = self.src_images
724
725 env.update(BuildInstanceHookEnv(
726 name=self.op.instance_name,
727 primary_node_name=self.op.pnode,
728 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
729 status=self.op.start,
730 os_type=self.op.os_type,
731 minmem=self.be_full[constants.BE_MINMEM],
732 maxmem=self.be_full[constants.BE_MAXMEM],
733 vcpus=self.be_full[constants.BE_VCPUS],
734 nics=NICListToTuple(self, self.nics),
735 disk_template=self.op.disk_template,
736 # Note that self.disks here is not a list with objects.Disk
737 # but with dicts as returned by ComputeDisks.
738 disks=self.disks,
739 bep=self.be_full,
740 hvp=self.hv_full,
741 hypervisor_name=self.op.hypervisor,
742 tags=self.op.tags,
743 ))
744
745 return env
746
747 def BuildHooksNodes(self):
748 """Build hooks nodes.
749
750 """
751 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
752 return nl, nl
753
754 def _ReadExportInfo(self):
755 """Reads the export information from disk.
756
757 It will override the opcode source node and path with the actual
758 information, if these two were not specified before.
759
760 @return: the export information
761
762 """
763 assert self.op.mode == constants.INSTANCE_IMPORT
764
765 if self.op.src_node_uuid is None:
766 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
767 exp_list = self.rpc.call_export_list(locked_nodes)
768 found = False
769 for node_uuid in exp_list:
770 if exp_list[node_uuid].fail_msg:
771 continue
772 if self.op.src_path in exp_list[node_uuid].payload:
773 found = True
774 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
775 self.op.src_node_uuid = node_uuid
776 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
777 self.op.src_path)
778 break
779 if not found:
780 raise errors.OpPrereqError("No export found for relative path %s" %
781 self.op.src_path, errors.ECODE_INVAL)
782
783 CheckNodeOnline(self, self.op.src_node_uuid)
784 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
785 result.Raise("No export or invalid export found in dir %s" %
786 self.op.src_path)
787
788 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
789 if not export_info.has_section(constants.INISECT_EXP):
790 raise errors.ProgrammerError("Corrupted export config",
791 errors.ECODE_ENVIRON)
792
793 ei_version = export_info.get(constants.INISECT_EXP, "version")
794 if int(ei_version) != constants.EXPORT_VERSION:
795 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
796 (ei_version, constants.EXPORT_VERSION),
797 errors.ECODE_ENVIRON)
798 return export_info
799
800 def _ReadExportParams(self, einfo):
801 """Use export parameters as defaults.
802
803 In case the opcode doesn't specify (as in override) some instance
804 parameters, then try to use them from the export information, if
805 that declares them.
806
807 """
808 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
809
810 if not self.op.disks:
811 disks = []
812 # TODO: import the disk iv_name too
813 for idx in range(constants.MAX_DISKS):
814 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
815 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
816 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
817 disk = {
818 constants.IDISK_SIZE: disk_sz,
819 constants.IDISK_NAME: disk_name
820 }
821 disks.append(disk)
822 self.op.disks = disks
823 if not disks and self.op.disk_template != constants.DT_DISKLESS:
824 raise errors.OpPrereqError("No disk info specified and the export"
825 " is missing the disk information",
826 errors.ECODE_INVAL)
827
828 if not self.op.nics:
829 nics = []
830 for idx in range(constants.MAX_NICS):
831 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
832 ndict = {}
833 for name in [constants.INIC_IP,
834 constants.INIC_MAC, constants.INIC_NAME]:
835 nic_param_name = "nic%d_%s" % (idx, name)
836 if einfo.has_option(constants.INISECT_INS, nic_param_name):
837 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
838 ndict[name] = v
839 network = einfo.get(constants.INISECT_INS,
840 "nic%d_%s" % (idx, constants.INIC_NETWORK))
841 # in case network is given link and mode are inherited
842 # from nodegroup's netparams and thus should not be passed here
843 if network:
844 ndict[constants.INIC_NETWORK] = network
845 else:
846 for name in list(constants.NICS_PARAMETERS):
847 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
848 ndict[name] = v
849 nics.append(ndict)
850 else:
851 break
852 self.op.nics = nics
853
854 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
855 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
856
857 if (self.op.hypervisor is None and
858 einfo.has_option(constants.INISECT_INS, "hypervisor")):
859 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
860
861 if einfo.has_section(constants.INISECT_HYP):
862 # use the export parameters but do not override the ones
863 # specified by the user
864 for name, value in einfo.items(constants.INISECT_HYP):
865 if name not in self.op.hvparams:
866 self.op.hvparams[name] = value
867
868 if einfo.has_section(constants.INISECT_BEP):
869 # use the parameters, without overriding
870 for name, value in einfo.items(constants.INISECT_BEP):
871 if name not in self.op.beparams:
872 self.op.beparams[name] = value
873 # Compatibility for the old "memory" be param
874 if name == constants.BE_MEMORY:
875 if constants.BE_MAXMEM not in self.op.beparams:
876 self.op.beparams[constants.BE_MAXMEM] = value
877 if constants.BE_MINMEM not in self.op.beparams:
878 self.op.beparams[constants.BE_MINMEM] = value
879 else:
880 # try to read the parameters old style, from the main section
881 for name in constants.BES_PARAMETERS:
882 if (name not in self.op.beparams and
883 einfo.has_option(constants.INISECT_INS, name)):
884 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
885
886 if einfo.has_section(constants.INISECT_OSP):
887 # use the parameters, without overriding
888 for name, value in einfo.items(constants.INISECT_OSP):
889 if name not in self.op.osparams:
890 self.op.osparams[name] = value
891
892 if einfo.has_section(constants.INISECT_OSP_PRIVATE):
893 # use the parameters, without overriding
894 for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
895 if name not in self.op.osparams_private:
896 self.op.osparams_private[name] = serializer.Private(value, descr=name)
897
898 def _RevertToDefaults(self, cluster):
899 """Revert the instance parameters to the default values.
900
901 """
902 # hvparams
903 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
904 for name in self.op.hvparams.keys():
905 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
906 del self.op.hvparams[name]
907 # beparams
908 be_defs = cluster.SimpleFillBE({})
909 for name in self.op.beparams.keys():
910 if name in be_defs and be_defs[name] == self.op.beparams[name]:
911 del self.op.beparams[name]
912 # nic params
913 nic_defs = cluster.SimpleFillNIC({})
914 for nic in self.op.nics:
915 for name in constants.NICS_PARAMETERS:
916 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
917 del nic[name]
918 # osparams
919 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
920 for name in self.op.osparams.keys():
921 if name in os_defs and os_defs[name] == self.op.osparams[name]:
922 del self.op.osparams[name]
923
924 os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
925 os_params_private={})
926 for name in self.op.osparams_private.keys():
927 if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
928 del self.op.osparams_private[name]
929
930 def CheckPrereq(self): # pylint: disable=R0914
931 """Check prerequisites.
932
933 """
934 # Check that the optimistically acquired groups are correct wrt the
935 # acquired nodes
936 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
937 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
938 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
939 if not owned_groups.issuperset(cur_groups):
940 raise errors.OpPrereqError("New instance %s's node groups changed since"
941 " locks were acquired, current groups are"
942 " are '%s', owning groups '%s'; retry the"
943 " operation" %
944 (self.op.instance_name,
945 utils.CommaJoin(cur_groups),
946 utils.CommaJoin(owned_groups)),
947 errors.ECODE_STATE)
948
949 self.instance_file_storage_dir = CalculateFileStorageDir(self)
950
951 if self.op.mode == constants.INSTANCE_IMPORT:
952 export_info = self._ReadExportInfo()
953 self._ReadExportParams(export_info)
954 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
955 else:
956 self._old_instance_name = None
957
958 if (not self.cfg.GetVGName() and
959 self.op.disk_template not in constants.DTS_NOT_LVM):
960 raise errors.OpPrereqError("Cluster does not support lvm-based"
961 " instances", errors.ECODE_STATE)
962
963 if (self.op.hypervisor is None or
964 self.op.hypervisor == constants.VALUE_AUTO):
965 self.op.hypervisor = self.cfg.GetHypervisorType()
966
967 cluster = self.cfg.GetClusterInfo()
968 enabled_hvs = cluster.enabled_hypervisors
969 if self.op.hypervisor not in enabled_hvs:
970 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
971 " cluster (%s)" %
972 (self.op.hypervisor, ",".join(enabled_hvs)),
973 errors.ECODE_STATE)
974
975 # Check tag validity
976 for tag in self.op.tags:
977 objects.TaggableObject.ValidateTag(tag)
978
979 # check hypervisor parameter syntax (locally)
980 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
981 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
982 self.op.hvparams)
983 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
984 hv_type.CheckParameterSyntax(filled_hvp)
985 self.hv_full = filled_hvp
986 # check that we don't specify global parameters on an instance
987 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
988 "instance", "cluster")
989
990 # fill and remember the beparams dict
991 self.be_full = _ComputeFullBeParams(self.op, cluster)
992
993 # build os parameters
994 if self.op.osparams_private is None:
995 self.op.osparams_private = serializer.PrivateDict()
996 if self.op.osparams_secret is None:
997 self.op.osparams_secret = serializer.PrivateDict()
998
999 self.os_full = cluster.SimpleFillOS(
1000 self.op.os_type,
1001 self.op.osparams,
1002 os_params_private=self.op.osparams_private,
1003 os_params_secret=self.op.osparams_secret
1004 )
1005
1006 # now that hvp/bep are in final format, let's reset to defaults,
1007 # if told to do so
1008 if self.op.identify_defaults:
1009 self._RevertToDefaults(cluster)
1010
1011 # NIC buildup
1012 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1013 self.proc.GetECId())
1014
1015 # disk checks/pre-build
1016 default_vg = self.cfg.GetVGName()
1017 self.disks = ComputeDisks(self.op.disks, self.op.disk_template, default_vg)
1018
1019 if self.op.mode == constants.INSTANCE_IMPORT:
1020 disk_images = []
1021 for idx in range(len(self.disks)):
1022 option = "disk%d_dump" % idx
1023 if export_info.has_option(constants.INISECT_INS, option):
1024 # FIXME: are the old os-es, disk sizes, etc. useful?
1025 export_name = export_info.get(constants.INISECT_INS, option)
1026 image = utils.PathJoin(self.op.src_path, export_name)
1027 disk_images.append(image)
1028 else:
1029 disk_images.append(False)
1030
1031 self.src_images = disk_images
1032
1033 if self.op.instance_name == self._old_instance_name:
1034 for idx, nic in enumerate(self.nics):
1035 if nic.mac == constants.VALUE_AUTO:
1036 nic_mac_ini = "nic%d_mac" % idx
1037 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1038
1039 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1040
1041 # ip ping checks (we use the same ip that was resolved in ExpandNames)
1042 if self.op.ip_check:
1043 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1044 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1045 (self.check_ip, self.op.instance_name),
1046 errors.ECODE_NOTUNIQUE)
1047
1048 #### mac address generation
1049 # By generating here the mac address both the allocator and the hooks get
1050 # the real final mac address rather than the 'auto' or 'generate' value.
1051 # There is a race condition between the generation and the instance object
1052 # creation, which means that we know the mac is valid now, but we're not
1053 # sure it will be when we actually add the instance. If things go bad
1054 # adding the instance will abort because of a duplicate mac, and the
1055 # creation job will fail.
1056 for nic in self.nics:
1057 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1058 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1059
1060 #### allocator run
1061
1062 if self.op.iallocator is not None:
1063 self._RunAllocator()
1064
1065 # Release all unneeded node locks
1066 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1067 self.op.src_node_uuid])
1068 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1069 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1070 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1071 # Release all unneeded group locks
1072 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1073 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1074
1075 assert (self.owned_locks(locking.LEVEL_NODE) ==
1076 self.owned_locks(locking.LEVEL_NODE_RES)), \
1077 "Node locks differ from node resource locks"
1078
1079 #### node related checks
1080
1081 # check primary node
1082 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1083 assert self.pnode is not None, \
1084 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1085 if pnode.offline:
1086 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1087 pnode.name, errors.ECODE_STATE)
1088 if pnode.drained:
1089 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1090 pnode.name, errors.ECODE_STATE)
1091 if not pnode.vm_capable:
1092 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1093 " '%s'" % pnode.name, errors.ECODE_STATE)
1094
1095 self.secondaries = []
1096
1097 # Fill in any IPs from IP pools. This must happen here, because we need to
1098 # know the nic's primary node, as specified by the iallocator
1099 for idx, nic in enumerate(self.nics):
1100 net_uuid = nic.network
1101 if net_uuid is not None:
1102 nobj = self.cfg.GetNetwork(net_uuid)
1103 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1104 if netparams is None:
1105 raise errors.OpPrereqError("No netparams found for network"
1106 " %s. Probably not connected to"
1107 " node's %s nodegroup" %
1108 (nobj.name, self.pnode.name),
1109 errors.ECODE_INVAL)
1110 self.LogInfo("NIC/%d inherits netparams %s" %
1111 (idx, netparams.values()))
1112 nic.nicparams = dict(netparams)
1113 if nic.ip is not None:
1114 if nic.ip.lower() == constants.NIC_IP_POOL:
1115 try:
1116 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1117 except errors.ReservationError:
1118 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1119 " from the address pool" % idx,
1120 errors.ECODE_STATE)
1121 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1122 else:
1123 try:
1124 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1125 check=self.op.conflicts_check)
1126 except errors.ReservationError:
1127 raise errors.OpPrereqError("IP address %s already in use"
1128 " or does not belong to network %s" %
1129 (nic.ip, nobj.name),
1130 errors.ECODE_NOTUNIQUE)
1131
1132 # net is None, ip None or given
1133 elif self.op.conflicts_check:
1134 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1135
1136 # mirror node verification
1137 if self.op.disk_template in constants.DTS_INT_MIRROR:
1138 if self.op.snode_uuid == pnode.uuid:
1139 raise errors.OpPrereqError("The secondary node cannot be the"
1140 " primary node", errors.ECODE_INVAL)
1141 CheckNodeOnline(self, self.op.snode_uuid)
1142 CheckNodeNotDrained(self, self.op.snode_uuid)
1143 CheckNodeVmCapable(self, self.op.snode_uuid)
1144 self.secondaries.append(self.op.snode_uuid)
1145
1146 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1147 if pnode.group != snode.group:
1148 self.LogWarning("The primary and secondary nodes are in two"
1149 " different node groups; the disk parameters"
1150 " from the first disk's node group will be"
1151 " used")
1152
1153 nodes = [pnode]
1154 if self.op.disk_template in constants.DTS_INT_MIRROR:
1155 nodes.append(snode)
1156 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1157 excl_stor = compat.any(map(has_es, nodes))
1158 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1159 raise errors.OpPrereqError("Disk template %s not supported with"
1160 " exclusive storage" % self.op.disk_template,
1161 errors.ECODE_STATE)
1162 for disk in self.disks:
1163 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1164
1165 node_uuids = [pnode.uuid] + self.secondaries
1166
1167 if not self.adopt_disks:
1168 if self.op.disk_template == constants.DT_RBD:
1169 # _CheckRADOSFreeSpace() is just a placeholder.
1170 # Any function that checks prerequisites can be placed here.
1171 # Check if there is enough space on the RADOS cluster.
1172 CheckRADOSFreeSpace()
1173 elif self.op.disk_template == constants.DT_EXT:
1174 # FIXME: Function that checks prereqs if needed
1175 pass
1176 elif self.op.disk_template in constants.DTS_LVM:
1177 # Check lv size requirements, if not adopting
1178 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1179 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1180 else:
1181 # FIXME: add checks for other, non-adopting, non-lvm disk templates
1182 pass
1183
1184 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1185 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1186 disk[constants.IDISK_ADOPT])
1187 for disk in self.disks])
1188 if len(all_lvs) != len(self.disks):
1189 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1190 errors.ECODE_INVAL)
1191 for lv_name in all_lvs:
1192 try:
1193 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1194 # to ReserveLV uses the same syntax
1195 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1196 except errors.ReservationError:
1197 raise errors.OpPrereqError("LV named %s used by another instance" %
1198 lv_name, errors.ECODE_NOTUNIQUE)
1199
1200 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1201 vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
1202 prereq=True)
1203
1204 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1205 vg_names.payload.keys())[pnode.uuid]
1206 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
1207 prereq=True)
1208 node_lvs = node_lvs.payload
1209
1210 delta = all_lvs.difference(node_lvs.keys())
1211 if delta:
1212 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1213 utils.CommaJoin(delta),
1214 errors.ECODE_INVAL)
1215 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1216 if online_lvs:
1217 raise errors.OpPrereqError("Online logical volumes found, cannot"
1218 " adopt: %s" % utils.CommaJoin(online_lvs),
1219 errors.ECODE_STATE)
1220 # update the size of disk based on what is found
1221 for dsk in self.disks:
1222 dsk[constants.IDISK_SIZE] = \
1223 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1224 dsk[constants.IDISK_ADOPT])][0]))
1225
1226 elif self.op.disk_template == constants.DT_BLOCK:
1227 # Normalize and de-duplicate device paths
1228 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1229 for disk in self.disks])
1230 if len(all_disks) != len(self.disks):
1231 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1232 errors.ECODE_INVAL)
1233 baddisks = [d for d in all_disks
1234 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1235 if baddisks:
1236 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1237 " cannot be adopted" %
1238 (utils.CommaJoin(baddisks),
1239 constants.ADOPTABLE_BLOCKDEV_ROOT),
1240 errors.ECODE_INVAL)
1241
1242 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1243 list(all_disks))[pnode.uuid]
1244 node_disks.Raise("Cannot get block device information from node %s" %
1245 pnode.name, prereq=True)
1246 node_disks = node_disks.payload
1247 delta = all_disks.difference(node_disks.keys())
1248 if delta:
1249 raise errors.OpPrereqError("Missing block device(s): %s" %
1250 utils.CommaJoin(delta),
1251 errors.ECODE_INVAL)
1252 for dsk in self.disks:
1253 dsk[constants.IDISK_SIZE] = \
1254 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1255
1256 # Check disk access param to be compatible with specified hypervisor
1257 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1258 node_group = self.cfg.GetNodeGroup(node_info.group)
1259 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
1260 group_access_type = group_disk_params[self.op.disk_template].get(
1261 constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1262 )
1263 for dsk in self.disks:
1264 access_type = dsk.get(constants.IDISK_ACCESS, group_access_type)
1265 if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1266 self.op.disk_template,
1267 access_type):
1268 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1269 " used with %s disk access param" %
1270 (self.op.hypervisor, access_type),
1271 errors.ECODE_STATE)
1272
1273 # Verify instance specs
1274 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1275 ispec = {
1276 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1277 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1278 constants.ISPEC_DISK_COUNT: len(self.disks),
1279 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1280 for disk in self.disks],
1281 constants.ISPEC_NIC_COUNT: len(self.nics),
1282 constants.ISPEC_SPINDLE_USE: spindle_use,
1283 }
1284
1285 group_info = self.cfg.GetNodeGroup(pnode.group)
1286 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1287 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1288 self.op.disk_template)
1289 if not self.op.ignore_ipolicy and res:
1290 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1291 (pnode.group, group_info.name, utils.CommaJoin(res)))
1292 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1293
1294 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1295
1296 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full,
1297 self.op.force_variant)
1298
1299 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1300
1301 CheckCompressionTool(self, self.op.compress)
1302
1303 #TODO: _CheckExtParams (remotely)
1304 # Check parameters for extstorage
1305
1306 # memory check on primary node
1307 #TODO(dynmem): use MINMEM for checking
1308 if self.op.start:
1309 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1310 self.op.hvparams)
1311 CheckNodeFreeMemory(self, self.pnode.uuid,
1312 "creating instance %s" % self.op.instance_name,
1313 self.be_full[constants.BE_MAXMEM],
1314 self.op.hypervisor, hvfull)
1315
1316 self.dry_run_result = list(node_uuids)
1317
1318 def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1319 """Removes degraded disks and instance.
1320
1321 It optionally checks whether disks are degraded. If the disks are
1322 degraded, they are removed and the instance is also removed from
1323 the configuration.
1324
1325 If L{disk_abort} is True, then the disks are considered degraded
1326 and removed, and the instance is removed from the configuration.
1327
1328 If L{disk_abort} is False, then it first checks whether disks are
1329 degraded and, if so, it removes the disks and the instance is
1330 removed from the configuration.
1331
1332 @type feedback_fn: callable
1333 @param feedback_fn: function used send feedback back to the caller
1334
1335 @type disk_abort: boolean
1336 @param disk_abort:
1337 True if disks are degraded, False to first check if disks are
1338 degraded
1339 @type instance: L{objects.Instance}
1340 @param instance: instance containing the disks to check
1341
1342 @rtype: NoneType
1343 @return: None
1344 @raise errors.OpPrereqError: if disks are degraded
1345
1346 """
1347 if disk_abort:
1348 pass
1349 elif self.op.wait_for_sync:
1350 disk_abort = not WaitForSync(self, instance)
1351 elif instance.disk_template in constants.DTS_INT_MIRROR:
1352 # make sure the disks are not degraded (still sync-ing is ok)
1353 feedback_fn("* checking mirrors status")
1354 disk_abort = not WaitForSync(self, instance, oneshot=True)
1355 else:
1356 disk_abort = False
1357
1358 if disk_abort:
1359 RemoveDisks(self, instance)
1360 for disk_uuid in instance.disks:
1361 self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
1362 self.cfg.RemoveInstance(instance.uuid)
1363 raise errors.OpExecError("There are some degraded disks for"
1364 " this instance")
1365
1366 def RunOsScripts(self, feedback_fn, iobj):
1367 """Run OS scripts
1368
1369 If necessary, disks are paused. It handles instance create,
1370 import, and remote import.
1371
1372 @type feedback_fn: callable
1373 @param feedback_fn: function used send feedback back to the caller
1374
1375 @type iobj: L{objects.Instance}
1376 @param iobj: instance object
1377
1378 """
1379 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1380 disks = self.cfg.GetInstanceDisks(iobj.uuid)
1381 if self.op.mode == constants.INSTANCE_CREATE:
1382 os_image = objects.GetOSImage(self.op.osparams)
1383
1384 if os_image is None and not self.op.no_install:
1385 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1386 not self.op.wait_for_sync)
1387 if pause_sync:
1388 feedback_fn("* pausing disk sync to install instance OS")
1389 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1390 (disks, iobj),
1391 True)
1392 for idx, success in enumerate(result.payload):
1393 if not success:
1394 logging.warn("pause-sync of instance %s for disk %d failed",
1395 self.op.instance_name, idx)
1396
1397 feedback_fn("* running the instance OS create scripts...")
1398 # FIXME: pass debug option from opcode to backend
1399 os_add_result = \
1400 self.rpc.call_instance_os_add(self.pnode.uuid,
1401 (iobj, self.op.osparams_secret),
1402 False,
1403 self.op.debug_level)
1404 if pause_sync:
1405 feedback_fn("* resuming disk sync")
1406 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1407 (disks, iobj),
1408 False)
1409 for idx, success in enumerate(result.payload):
1410 if not success:
1411 logging.warn("resume-sync of instance %s for disk %d failed",
1412 self.op.instance_name, idx)
1413
1414 os_add_result.Raise("Could not add os for instance %s"
1415 " on node %s" % (self.op.instance_name,
1416 self.pnode.name))
1417
1418 else:
1419 if self.op.mode == constants.INSTANCE_IMPORT:
1420 feedback_fn("* running the instance OS import scripts...")
1421
1422 transfers = []
1423
1424 for idx, image in enumerate(self.src_images):
1425 if not image:
1426 continue
1427
1428 if iobj.os:
1429 dst_io = constants.IEIO_SCRIPT
1430 dst_ioargs = ((disks[idx], iobj), idx)
1431 else:
1432 dst_io = constants.IEIO_RAW_DISK
1433 dst_ioargs = (disks[idx], iobj)
1434
1435 # FIXME: pass debug option from opcode to backend
1436 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1437 constants.IEIO_FILE, (image, ),
1438 dst_io, dst_ioargs,
1439 None)
1440 transfers.append(dt)
1441
1442 import_result = \
1443 masterd.instance.TransferInstanceData(self, feedback_fn,
1444 self.op.src_node_uuid,
1445 self.pnode.uuid,
1446 self.pnode.secondary_ip,
1447 self.op.compress,
1448 iobj, transfers)
1449 if not compat.all(import_result):
1450 self.LogWarning("Some disks for instance %s on node %s were not"
1451 " imported successfully" % (self.op.instance_name,
1452 self.pnode.name))
1453
1454 rename_from = self._old_instance_name
1455
1456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1457 feedback_fn("* preparing remote import...")
1458 # The source cluster will stop the instance before attempting to make
1459 # a connection. In some cases stopping an instance can take a long
1460 # time, hence the shutdown timeout is added to the connection
1461 # timeout.
1462 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1463 self.op.source_shutdown_timeout)
1464 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1465
1466 assert iobj.primary_node == self.pnode.uuid
1467 disk_results = \
1468 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1469 self.source_x509_ca,
1470 self._cds, self.op.compress, timeouts)
1471 if not compat.all(disk_results):
1472 # TODO: Should the instance still be started, even if some disks
1473 # failed to import (valid for local imports, too)?
1474 self.LogWarning("Some disks for instance %s on node %s were not"
1475 " imported successfully" % (self.op.instance_name,
1476 self.pnode.name))
1477
1478 rename_from = self.source_instance_name
1479
1480 else:
1481 # also checked in the prereq part
1482 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1483 % self.op.mode)
1484
1485 assert iobj.name == self.op.instance_name
1486
1487 # Run rename script on newly imported instance
1488 if iobj.os:
1489 feedback_fn("Running rename script for %s" % self.op.instance_name)
1490 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1491 rename_from,
1492 self.op.debug_level)
1493 result.Warn("Failed to run rename script for %s on node %s" %
1494 (self.op.instance_name, self.pnode.name), self.LogWarning)
1495
1496 def GetOsInstallPackageEnvironment(self, instance, script):
1497 """Returns the OS scripts environment for the helper VM
1498
1499 @type instance: L{objects.Instance}
1500 @param instance: instance for which the OS scripts are run
1501
1502 @type script: string
1503 @param script: script to run (e.g.,
1504 constants.OS_SCRIPT_CREATE_UNTRUSTED)
1505
1506 @rtype: dict of string to string
1507 @return: OS scripts environment for the helper VM
1508
1509 """
1510 env = {"OS_SCRIPT": script}
1511
1512 # We pass only the instance's disks, not the helper VM's disks.
1513 if instance.hypervisor == constants.HT_KVM:
1514 prefix = "/dev/vd"
1515 elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
1516 prefix = "/dev/xvd"
1517 else:
1518 raise errors.OpExecError("Cannot run OS scripts in a virtualized"
1519 " environment for hypervisor '%s'"
1520 % instance.hypervisor)
1521
1522 num_disks = len(self.cfg.GetInstanceDisks(instance.uuid))
1523
1524 for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1,
1525 start=1)):
1526 env["DISK_%d_PATH" % idx] = disk_label
1527
1528 return env
1529
1530 def UpdateInstanceOsInstallPackage(self, feedback_fn, instance, override_env):
1531 """Updates the OS parameter 'os-install-package' for an instance.
1532
1533 The OS install package is an archive containing an OS definition
1534 and a file containing the environment variables needed to run the
1535 OS scripts.
1536
1537 The OS install package is served by the metadata daemon to the
1538 instances, so the OS scripts can run inside the virtualized
1539 environment.
1540
1541 @type feedback_fn: callable
1542 @param feedback_fn: function used send feedback back to the caller
1543
1544 @type instance: L{objects.Instance}
1545 @param instance: instance for which the OS parameter
1546 'os-install-package' is updated
1547
1548 @type override_env: dict of string to string
1549 @param override_env: if supplied, it overrides the environment of
1550 the export OS scripts archive
1551
1552 """
1553 if "os-install-package" in instance.osparams:
1554 feedback_fn("Using OS install package '%s'" %
1555 instance.osparams["os-install-package"])
1556 else:
1557 result = self.rpc.call_os_export(instance.primary_node, instance,
1558 override_env)
1559 result.Raise("Could not export OS '%s'" % instance.os)
1560 instance.osparams["os-install-package"] = result.payload
1561
1562 feedback_fn("Created OS install package '%s'" % result.payload)
1563
1564 def RunOsScriptsVirtualized(self, feedback_fn, instance):
1565 """Runs the OS scripts inside a safe virtualized environment.
1566
1567 The virtualized environment reuses the instance and temporarily
1568 creates a disk onto which the image of the helper VM is dumped.
1569 The temporary disk is used to boot the helper VM. The OS scripts
1570 are passed to the helper VM through the metadata daemon and the OS
1571 install package.
1572
1573 @type feedback_fn: callable
1574 @param feedback_fn: function used send feedback back to the caller
1575
1576 @type instance: L{objects.Instance}
1577 @param instance: instance for which the OS scripts must be run
1578 inside the virtualized environment
1579
1580 """
1581 install_image = self.cfg.GetInstallImage()
1582
1583 if not install_image:
1584 raise errors.OpExecError("Cannot create install instance because an"
1585 " install image has not been specified")
1586
1587 disk_size = DetermineImageSize(self, install_image, instance.primary_node)
1588
1589 env = self.GetOsInstallPackageEnvironment(
1590 instance,
1591 constants.OS_SCRIPT_CREATE_UNTRUSTED)
1592 self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env)
1593 UpdateMetadata(feedback_fn, self.rpc, instance,
1594 osparams_private=self.op.osparams_private,
1595 osparams_secret=self.op.osparams_secret)
1596
1597 with TemporaryDisk(self,
1598 instance,
1599 [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
1600 feedback_fn):
1601 feedback_fn("Activating instance disks")
1602 StartInstanceDisks(self, instance, False)
1603
1604 feedback_fn("Imaging disk with install image")
1605 ImageDisks(self, instance, install_image)
1606
1607 feedback_fn("Starting instance with install image")
1608 result = self.rpc.call_instance_start(instance.primary_node,
1609 (instance, [], []),
1610 False, self.op.reason)
1611 result.Raise("Could not start instance '%s' with the install image '%s'"
1612 % (instance.name, install_image))
1613
1614 # First wait for the instance to start up
1615 running_check = lambda: IsInstanceRunning(self, instance, prereq=False)
1616 instance_up = retry.SimpleRetry(True, running_check, 5.0,
1617 self.op.helper_startup_timeout)
1618 if not instance_up:
1619 raise errors.OpExecError("Could not boot instance using install image"
1620 " '%s'" % install_image)
1621
1622 feedback_fn("Instance is up, now awaiting shutdown")
1623
1624 # Then for it to be finished, detected by its shutdown
1625 instance_up = retry.SimpleRetry(False, running_check, 20.0,
1626 self.op.helper_shutdown_timeout)
1627 if instance_up:
1628 self.LogWarning("Installation not completed prior to timeout, shutting"
1629 " down instance forcibly")
1630
1631 feedback_fn("Installation complete")
1632
1633 def Exec(self, feedback_fn):
1634 """Create and add the instance to the cluster.
1635
1636 """
1637 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1638 self.owned_locks(locking.LEVEL_NODE)), \
1639 "Node locks differ from node resource locks"
1640
1641 ht_kind = self.op.hypervisor
1642 if ht_kind in constants.HTS_REQ_PORT:
1643 network_port = self.cfg.AllocatePort()
1644 else:
1645 network_port = None
1646
1647 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1648
1649 # This is ugly but we got a chicken-egg problem here
1650 # We can only take the group disk parameters, as the instance
1651 # has no disks yet (we are generating them right here).
1652 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1653 disks = GenerateDiskTemplate(self,
1654 self.op.disk_template,
1655 instance_uuid, self.pnode.uuid,
1656 self.secondaries,
1657 self.disks,
1658 self.instance_file_storage_dir,
1659 self.op.file_driver,
1660 0,
1661 feedback_fn,
1662 self.cfg.GetGroupDiskParams(nodegroup))
1663
1664 if self.op.os_type is None:
1665 os_type = ""
1666 else:
1667 os_type = self.op.os_type
1668
1669 iobj = objects.Instance(name=self.op.instance_name,
1670 uuid=instance_uuid,
1671 os=os_type,
1672 primary_node=self.pnode.uuid,
1673 nics=self.nics, disks=[],
1674 disk_template=self.op.disk_template,
1675 disks_active=False,
1676 admin_state=constants.ADMINST_DOWN,
1677 admin_state_source=constants.ADMIN_SOURCE,
1678 network_port=network_port,
1679 beparams=self.op.beparams,
1680 hvparams=self.op.hvparams,
1681 hypervisor=self.op.hypervisor,
1682 osparams=self.op.osparams,
1683 osparams_private=self.op.osparams_private,
1684 )
1685
1686 if self.op.tags:
1687 for tag in self.op.tags:
1688 iobj.AddTag(tag)
1689
1690 if self.adopt_disks:
1691 if self.op.disk_template == constants.DT_PLAIN:
1692 # rename LVs to the newly-generated names; we need to construct
1693 # 'fake' LV disks with the old data, plus the new unique_id
1694 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1695 rename_to = []
1696 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1697 rename_to.append(t_dsk.logical_id)
1698 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1699 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1700 zip(tmp_disks, rename_to))
1701 result.Raise("Failed to rename adoped LVs")
1702 else:
1703 feedback_fn("* creating instance disks...")
1704 try:
1705 CreateDisks(self, iobj, disks=disks)
1706 except errors.OpExecError:
1707 self.LogWarning("Device creation failed")
1708 self.cfg.ReleaseDRBDMinors(instance_uuid)
1709 raise
1710
1711 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1712 self.cfg.AddInstance(iobj, self.proc.GetECId())
1713
1714 feedback_fn("adding disks to cluster config")
1715 for disk in disks:
1716 self.cfg.AddInstanceDisk(iobj.uuid, disk)
1717
1718 # re-read the instance from the configuration
1719 iobj = self.cfg.GetInstanceInfo(iobj.uuid)
1720
1721 if self.op.mode == constants.INSTANCE_IMPORT:
1722 # Release unused nodes
1723 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1724 else:
1725 # Release all nodes
1726 ReleaseLocks(self, locking.LEVEL_NODE)
1727
1728 # Wipe disks
1729 disk_abort = False
1730 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1731 feedback_fn("* wiping instance disks...")
1732 try:
1733 WipeDisks(self, iobj)
1734 except errors.OpExecError, err:
1735 logging.exception("Wiping disks failed")
1736 self.LogWarning("Wiping instance disks failed (%s)", err)
1737 disk_abort = True
1738
1739 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1740
1741 # Image disks
1742 os_image = objects.GetOSImage(iobj.osparams)
1743 disk_abort = False
1744
1745 if not self.adopt_disks and os_image is not None:
1746 feedback_fn("* imaging instance disks...")
1747 try:
1748 ImageDisks(self, iobj, os_image)
1749 except errors.OpExecError, err:
1750 logging.exception("Imaging disks failed")
1751 self.LogWarning("Imaging instance disks failed (%s)", err)
1752 disk_abort = True
1753
1754 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1755
1756 # instance disks are now active
1757 iobj.disks_active = True
1758
1759 # Release all node resource locks
1760 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1761
1762 if iobj.os:
1763 result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node]
1764 result.Raise("Failed to get OS '%s'" % iobj.os)
1765
1766 trusted = None
1767
1768 for (name, _, _, _, _, _, _, os_trusted) in result.payload:
1769 if name == objects.OS.GetName(iobj.os):
1770 trusted = os_trusted
1771 break
1772
1773 if trusted is None:
1774 raise errors.OpPrereqError("OS '%s' is not available in node '%s'" %
1775 (iobj.os, iobj.primary_node))
1776 elif trusted:
1777 self.RunOsScripts(feedback_fn, iobj)
1778 else:
1779 self.RunOsScriptsVirtualized(feedback_fn, iobj)
1780 # Instance is modified by 'RunOsScriptsVirtualized',
1781 # therefore, it must be retrieved once again from the
1782 # configuration, otherwise there will be a config object
1783 # version mismatch.
1784 iobj = self.cfg.GetInstanceInfo(iobj.uuid)
1785
1786 # Update instance metadata so that it can be reached from the
1787 # metadata service.
1788 UpdateMetadata(feedback_fn, self.rpc, iobj,
1789 osparams_private=self.op.osparams_private,
1790 osparams_secret=self.op.osparams_secret)
1791
1792 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1793
1794 if self.op.start:
1795 iobj.admin_state = constants.ADMINST_UP
1796 self.cfg.Update(iobj, feedback_fn)
1797 logging.info("Starting instance %s on node %s", self.op.instance_name,
1798 self.pnode.name)
1799 feedback_fn("* starting instance...")
1800 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1801 False, self.op.reason)
1802 result.Raise("Could not start instance")
1803
1804 return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
1805
1806 def PrepareRetry(self, feedback_fn):
1807 # A temporary lack of resources can only happen if opportunistic locking
1808 # is used.
1809 assert self.op.opportunistic_locking
1810
1811 logging.info("Opportunistic locking did not suceed, falling back to"
1812 " full lock allocation")
1813 feedback_fn("* falling back to full lock allocation")
1814 self.op.opportunistic_locking = False
1815
1816
1817 class LUInstanceRename(LogicalUnit):
1818 """Rename an instance.
1819
1820 """
1821 HPATH = "instance-rename"
1822 HTYPE = constants.HTYPE_INSTANCE
1823 REQ_BGL = False
1824
1825 def CheckArguments(self):
1826 """Check arguments.
1827
1828 """
1829 if self.op.ip_check and not self.op.name_check:
1830 # TODO: make the ip check more flexible and not depend on the name check
1831 raise errors.OpPrereqError("IP address check requires a name check",
1832 errors.ECODE_INVAL)
1833
1834 self._new_name_resolved = False
1835
1836 def BuildHooksEnv(self):
1837 """Build hooks env.
1838
1839 This runs on master, primary and secondary nodes of the instance.
1840
1841 """
1842 env = BuildInstanceHookEnvByObject(self, self.instance)
1843 env["INSTANCE_NEW_NAME"] = self.op.new_name
1844 return env
1845
1846 def BuildHooksNodes(self):
1847 """Build hooks nodes.
1848
1849 """
1850 nl = [self.cfg.GetMasterNode()] + \
1851 list(self.cfg.GetInstanceNodes(self.instance.uuid))
1852 return (nl, nl)
1853
1854 def _PerformChecksAndResolveNewName(self):
1855 """Checks and resolves the new name, storing the FQDN, if permitted.
1856
1857 """
1858 if self._new_name_resolved or not self.op.name_check:
1859 return
1860
1861 hostname = _CheckHostnameSane(self, self.op.new_name)
1862 self.op.new_name = hostname.name
1863 if (self.op.ip_check and
1864 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1865 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1866 (hostname.ip, self.op.new_name),
1867 errors.ECODE_NOTUNIQUE)
1868 self._new_name_resolved = True
1869
1870 def CheckPrereq(self):
1871 """Check prerequisites.
1872
1873 This checks that the instance is in the cluster and is not running.
1874
1875 """
1876 (self.op.instance_uuid, self.op.instance_name) = \
1877 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1878 self.op.instance_name)
1879 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1880 assert instance is not None
1881
1882 # It should actually not happen that an instance is running with a disabled
1883 # disk template, but in case it does, the renaming of file-based instances
1884 # will fail horribly. Thus, we test it before.
1885 if (instance.disk_template in constants.DTS_FILEBASED and
1886 self.op.new_name != instance.name):
1887 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1888 instance.disk_template)
1889
1890 CheckNodeOnline(self, instance.primary_node)
1891 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1892 msg="cannot rename")
1893 self.instance = instance
1894
1895 self._PerformChecksAndResolveNewName()
1896
1897 if self.op.new_name != instance.name:
1898 CheckInstanceExistence(self, self.op.new_name)
1899
1900 def ExpandNames(self):
1901 self._ExpandAndLockInstance()
1902
1903 # Note that this call might not resolve anything if name checks have been
1904 # disabled in the opcode. In this case, we might have a renaming collision
1905 # if a shortened name and a full name are used simultaneously, as we will
1906 # have two different locks. However, at that point the user has taken away
1907 # the tools necessary to detect this issue.
1908 self._PerformChecksAndResolveNewName()
1909
1910 # Used to prevent instance namespace collisions.
1911 if self.op.new_name != self.op.instance_name:
1912 CheckInstanceExistence(self, self.op.new_name)
1913 self.add_locks[locking.LEVEL_INSTANCE] = self.op.new_name
1914
1915 def Exec(self, feedback_fn):
1916 """Rename the instance.
1917
1918 """
1919 old_name = self.instance.name
1920
1921 rename_file_storage = False
1922 if (self.instance.disk_template in (constants.DT_FILE,
1923 constants.DT_SHARED_FILE) and
1924 self.op.new_name != self.instance.name):
1925 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1926 old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
1927 rename_file_storage = True
1928
1929 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1930
1931 # Assert that we have both the locks needed
1932 assert old_name in self.owned_locks(locking.LEVEL_INSTANCE)
1933 assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE)
1934
1935 # re-read the instance from the configuration after rename
1936 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1937 disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)
1938
1939 if rename_file_storage:
1940 new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
1941 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1942 old_file_storage_dir,
1943 new_file_storage_dir)
1944 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1945 " (but the instance has been renamed in Ganeti)" %
1946 (self.cfg.GetNodeName(renamed_inst.primary_node),
1947 old_file_storage_dir, new_file_storage_dir))
1948
1949 StartInstanceDisks(self, renamed_inst, None)
1950 renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)
1951
1952 # update info on disks
1953 info = GetInstanceInfoText(renamed_inst)
1954 for (idx, disk) in enumerate(disks):
1955 for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
1956 result = self.rpc.call_blockdev_setinfo(node_uuid,
1957 (disk, renamed_inst), info)
1958 result.Warn("Error setting info on node %s for disk %s" %
1959 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1960 try:
1961 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1962 renamed_inst, old_name,
1963 self.op.debug_level)
1964 result.Warn("Could not run OS rename script for instance %s on node %s"
1965 " (but the instance has been renamed in Ganeti)" %
1966 (renamed_inst.name,
1967 self.cfg.GetNodeName(renamed_inst.primary_node)),
1968 self.LogWarning)
1969 finally:
1970 ShutdownInstanceDisks(self, renamed_inst)
1971
1972 return renamed_inst.name
1973
1974
1975 class LUInstanceRemove(LogicalUnit):
1976 """Remove an instance.
1977
1978 """
1979 HPATH = "instance-remove"
1980 HTYPE = constants.HTYPE_INSTANCE
1981 REQ_BGL = False
1982
1983 def ExpandNames(self):
1984 self._ExpandAndLockInstance()
1985 self.needed_locks[locking.LEVEL_NODE] = []
1986 self.needed_locks[locking.LEVEL_NODE_RES] = []
1987 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1988 self.dont_collate_locks[locking.LEVEL_NODE] = True
1989 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
1990
1991 def DeclareLocks(self, level):
1992 if level == locking.LEVEL_NODE:
1993 self._LockInstancesNodes()
1994 elif level == locking.LEVEL_NODE_RES:
1995 # Copy node locks
1996 self.needed_locks[locking.LEVEL_NODE_RES] = \
1997 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1998
1999 def BuildHooksEnv(self):
2000 """Build hooks env.
2001
2002 This runs on master, primary and secondary nodes of the instance.
2003
2004 """
2005 env = BuildInstanceHookEnvByObject(self, self.instance,
2006 secondary_nodes=self.secondary_nodes,
2007 disks=self.inst_disks)
2008 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
2009 return env
2010
2011 def BuildHooksNodes(self):
2012 """Build hooks nodes.
2013
2014 """
2015 nl = [self.cfg.GetMasterNode()]
2016 nl_post = list(self.cfg.GetInstanceNodes(self.instance.uuid)) + nl
2017 return (nl, nl_post)
2018
2019 def CheckPrereq(self):
2020 """Check prerequisites.
2021
2022 This checks that the instance is in the cluster.
2023
2024 """
2025 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2026 assert self.instance is not None, \
2027 "Cannot retrieve locked instance %s" % self.op.instance_name
2028 self.secondary_nodes = \
2029 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
2030 self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
2031
2032 def Exec(self, feedback_fn):
2033 """Remove the instance.
2034
2035 """
2036 logging.info("Shutting down instance %s on node %s", self.instance.name,
2037 self.cfg.GetNodeName(self.instance.primary_node))
2038
2039 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
2040 self.instance,
2041 self.op.shutdown_timeout,
2042 self.op.reason)
2043 if self.op.ignore_failures:
2044 result.Warn("Warning: can't shutdown instance", feedback_fn)
2045 else:
2046 result.Raise("Could not shutdown instance %s on node %s" %
2047 (self.instance.name,
2048 self.cfg.GetNodeName(self.instance.primary_node)))
2049
2050 assert (self.owned_locks(locking.LEVEL_NODE) ==
2051 self.owned_locks(locking.LEVEL_NODE_RES))
2052 assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) -
2053 self.owned_locks(locking.LEVEL_NODE)), \
2054 "Not owning correct locks"
2055
2056 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
2057
2058
2059 class LUInstanceMove(LogicalUnit):
2060 """Move an instance by data-copying.
2061
2062 This LU is only used if the instance needs to be moved by copying the data
2063 from one node in the cluster to another. The instance is shut down and
2064 the data is copied to the new node and the configuration change is propagated,
2065 then the instance is started again.
2066
2067 See also:
2068 L{LUInstanceFailover} for moving an instance on shared storage (no copying
2069 required).
2070
2071 L{LUInstanceMigrate} for the live migration of an instance (no shutdown
2072 required).
2073 """
2074 HPATH = "instance-move"
2075 HTYPE = constants.HTYPE_INSTANCE
2076 REQ_BGL = False
2077
2078 def ExpandNames(self):
2079 self._ExpandAndLockInstance()
2080 (self.op.target_node_uuid, self.op.target_node) = \
2081 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
2082 self.op.target_node)
2083 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
2084 self.needed_locks[locking.LEVEL_NODE_RES] = []
2085 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
2086
2087 def DeclareLocks(self, level):
2088 if level == locking.LEVEL_NODE:
2089 self._LockInstancesNodes(primary_only=True)
2090 elif level == locking.LEVEL_NODE_RES:
2091 # Copy node locks
2092 self.needed_locks[locking.LEVEL_NODE_RES] = \
2093 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2094
2095 def BuildHooksEnv(self):
2096 """Build hooks env.
2097
2098 This runs on master, primary and target nodes of the instance.
2099
2100 """
2101 env = {
2102 "TARGET_NODE": self.op.target_node,
2103 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
2104 }
2105 env.update(BuildInstanceHookEnvByObject(self, self.instance))
2106 return env
2107
2108 def BuildHooksNodes(self):
2109 """Build hooks nodes.
2110
2111 """
2112 nl = [
2113 self.cfg.GetMasterNode(),
2114 self.instance.primary_node,
2115 self.op.target_node_uuid,
2116 ]
2117 return (nl, nl)
2118
2119 def CheckPrereq(self):
2120 """Check prerequisites.
2121
2122 This checks that the instance is in the cluster.
2123
2124 """
2125 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2126 assert self.instance is not None, \
2127 "Cannot retrieve locked instance %s" % self.op.instance_name
2128
2129 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
2130 for idx, dsk in enumerate(disks):
2131 if dsk.dev_type not in constants.DTS_COPYABLE:
2132 raise errors.OpPrereqError("Instance disk %d has disk type %s and is"
2133 " not suitable for copying"
2134 % (idx, dsk.dev_type), errors.ECODE_STATE)
2135
2136 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
2137 assert target_node is not None, \
2138 "Cannot retrieve locked node %s" % self.op.target_node
2139
2140 self.target_node_uuid = target_node.uuid
2141 if target_node.uuid == self.instance.primary_node:
2142 raise errors.OpPrereqError("Instance %s is already on the node %s" %
2143 (self.instance.name, target_node.name),
2144 errors.ECODE_STATE)
2145
2146 cluster = self.cfg.GetClusterInfo()
2147 bep = cluster.FillBE(self.instance)
2148
2149 CheckNodeOnline(self, target_node.uuid)
2150 CheckNodeNotDrained(self, target_node.uuid)
2151 CheckNodeVmCapable(self, target_node.uuid)
2152 group_info = self.cfg.GetNodeGroup(target_node.group)
2153 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
2154 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
2155 ignore=self.op.ignore_ipolicy)
2156
2157 if self.instance.admin_state == constants.ADMINST_UP:
2158 # check memory requirements on the target node
2159 CheckNodeFreeMemory(
2160 self, target_node.uuid, "failing over instance %s" %
2161 self.instance.name, bep[constants.BE_MAXMEM],
2162 self.instance.hypervisor,
2163 cluster.hvparams[self.instance.hypervisor])
2164 else:
2165 self.LogInfo("Not checking memory on the secondary node as"
2166 " instance will not be started")
2167
2168 # check bridge existance
2169 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
2170
2171 def Exec(self, feedback_fn):
2172 """Move an instance.
2173
2174 The move is done by shutting it down on its present node, copying
2175 the data over (slow) and starting it on the new node.
2176
2177 """
2178 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
2179 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
2180
2181 self.LogInfo("Shutting down instance %s on source node %s",
2182 self.instance.name, source_node.name)
2183
2184 assert (self.owned_locks(locking.LEVEL_NODE) ==
2185 self.owned_locks(locking.LEVEL_NODE_RES))
2186
2187 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
2188 self.op.shutdown_timeout,
2189 self.op.reason)
2190 if self.op.ignore_consistency:
2191 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
2192 " anyway. Please make sure node %s is down. Error details" %
2193 (self.instance.name, source_node.name, source_node.name),
2194 self.LogWarning)
2195 else:
2196 result.Raise("Could not shutdown instance %s on node %s" %
2197 (self.instance.name, source_node.name))
2198
2199 # create the target disks
2200 try:
2201 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
2202 except errors.OpExecError:
2203 self.LogWarning("Device creation failed")
2204 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2205 raise
2206
2207 errs = []
2208 transfers = []
2209 # activate, get path, create transfer jobs
2210 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
2211 for idx, disk in enumerate(disks):
2212 # FIXME: pass debug option from opcode to backend
2213 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
2214 constants.IEIO_RAW_DISK,
2215 (disk, self.instance),
2216 constants.IEIO_RAW_DISK,
2217 (disk, self.instance),
2218 None)
2219 transfers.append(dt)
2220 self.cfg.Update(disk, feedback_fn)
2221
2222 import_result = \
2223 masterd.instance.TransferInstanceData(self, feedback_fn,
2224 source_node.uuid,
2225 target_node.uuid,
2226 target_node.secondary_ip,
2227 self.op.compress,
2228 self.instance, transfers)
2229 if not compat.all(import_result):
2230 errs.append("Failed to transfer instance data")
2231
2232 if errs:
2233 self.LogWarning("Some disks failed to copy, aborting")
2234 try:
2235 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
2236 finally:
2237 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2238 raise errors.OpExecError("Errors during disk copy: %s" %
2239 (",".join(errs),))
2240
2241 self.instance.primary_node = target_node.uuid
2242 self.cfg.Update(self.instance, feedback_fn)
2243
2244 self.LogInfo("Removing the disks on the original node")
2245 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
2246
2247 # Only start the instance if it's marked as up
2248 if self.instance.admin_state == constants.ADMINST_UP:
2249 self.LogInfo("Starting instance %s on node %s",
2250 self.instance.name, target_node.name)
2251
2252 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
2253 ignore_secondaries=True)
2254 if not disks_ok:
2255 ShutdownInstanceDisks(self, self.instance)
2256 raise errors.OpExecError("Can't activate the instance's disks")
2257
2258 result = self.rpc.call_instance_start(target_node.uuid,
2259 (self.instance, None, None), False,
2260 self.op.reason)
2261 msg = result.fail_msg
2262 if msg:
2263 ShutdownInstanceDisks(self, self.instance)
2264 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2265 (self.instance.name, target_node.name, msg))
2266
2267
2268 class LUInstanceMultiAlloc(NoHooksLU):
2269 """Allocates multiple instances at the same time.
2270
2271 """
2272 REQ_BGL = False
2273
2274 def CheckArguments(self):
2275 """Check arguments.
2276
2277 """
2278 nodes = []
2279 for inst in self.op.instances:
2280 if inst.iallocator is not None:
2281 raise errors.OpPrereqError("iallocator are not allowed to be set on"
2282 " instance objects", errors.ECODE_INVAL)
2283 nodes.append(bool(inst.pnode))
2284 if inst.disk_template in constants.DTS_INT_MIRROR:
2285 nodes.append(bool(inst.snode))
2286
2287 has_nodes = compat.any(nodes)
2288 if compat.all(nodes) ^ has_nodes:
2289 raise errors.OpPrereqError("There are instance objects providing"
2290 " pnode/snode while others do not",
2291 errors.ECODE_INVAL)
2292
2293 if not has_nodes and self.op.iallocator is None:
2294 default_iallocator = self.cfg.GetDefaultIAllocator()
2295 if default_iallocator:
2296 self.op.iallocator = default_iallocator
2297 else:
2298 raise errors.OpPrereqError("No iallocator or nodes on the instances"
2299 " given and no cluster-wide default"
2300 " iallocator found; please specify either"
2301 " an iallocator or nodes on the instances"
2302 " or set a cluster-wide default iallocator",
2303 errors.ECODE_INVAL)
2304
2305 _CheckOpportunisticLocking(self.op)
2306
2307 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2308 if dups:
2309 raise errors.OpPrereqError("There are duplicate instance names: %s" %
2310 utils.CommaJoin(dups), errors.ECODE_INVAL)
2311
2312 def ExpandNames(self):
2313 """Calculate the locks.
2314
2315 """
2316 self.share_locks = ShareAll()
2317 self.needed_locks = {
2318 # iallocator will select nodes and even if no iallocator is used,
2319 # collisions with LUInstanceCreate should be avoided
2320 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2321 }
2322
2323 if self.op.iallocator:
2324 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2325 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2326
2327 if self.op.opportunistic_locking:
2328 self.opportunistic_locks[locking.LEVEL_NODE] = True
2329 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2330 else:
2331 nodeslist = []
2332 for inst in self.op.instances:
2333 (inst.pnode_uuid, inst.pnode) = \
2334 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2335 nodeslist.append(inst.pnode_uuid)
2336 if inst.snode is not None:
2337 (inst.snode_uuid, inst.snode) = \
2338 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2339 nodeslist.append(inst.snode_uuid)
2340
2341 self.needed_locks[locking.LEVEL_NODE] = nodeslist
2342 # Lock resources of instance's primary and secondary nodes (copy to
2343 # prevent accidential modification)
2344 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2345
2346 def CheckPrereq(self):
2347 """Check prerequisite.
2348
2349 """
2350 if self.op.iallocator:
2351 cluster = self.cfg.GetClusterInfo()
2352 default_vg = self.cfg.GetVGName()
2353 ec_id = self.proc.GetECId()
2354
2355 if self.op.opportunistic_locking:
2356 # Only consider nodes for which a lock is held
2357 node_whitelist = self.cfg.GetNodeNames(
2358 set(self.owned_locks(locking.LEVEL_NODE)) &
2359 set(self.owned_locks(locking.LEVEL_NODE_RES)))
2360 else:
2361 node_whitelist = None
2362
2363 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op.disks,
2364 op.disk_template,
2365 default_vg),
2366 _ComputeNics(op, cluster, None,
2367 self.cfg, ec_id),
2368 _ComputeFullBeParams(op, cluster),
2369 node_whitelist)
2370 for op in self.op.instances]
2371
2372 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2373 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2374
2375 ial.Run(self.op.iallocator)
2376
2377 if not ial.success:
2378 raise errors.OpPrereqError("Can't compute nodes using"
2379 " iallocator '%s': %s" %
2380 (self.op.iallocator, ial.info),
2381 errors.ECODE_NORES)
2382
2383 self.ia_result = ial.result
2384
2385 if self.op.dry_run:
2386 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2387 constants.JOB_IDS_KEY: [],
2388 })
2389
2390 def _ConstructPartialResult(self):
2391 """Contructs the partial result.
2392
2393 """
2394 if self.op.iallocator:
2395 (allocatable, failed_insts) = self.ia_result
2396 allocatable_insts = map(compat.fst, allocatable)
2397 else:
2398 allocatable_insts = [op.instance_name for op in self.op.instances]
2399 failed_insts = []
2400
2401 return {
2402 constants.ALLOCATABLE_KEY: allocatable_insts,
2403 constants.FAILED_KEY: failed_insts,
2404 }
2405
2406 def Exec(self, feedback_fn):
2407 """Executes the opcode.
2408
2409 """
2410 jobs = []
2411 if self.op.iallocator:
2412 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2413 (allocatable, failed) = self.ia_result
2414
2415 for (name, node_names) in allocatable:
2416 op = op2inst.pop(name)
2417
2418 (op.pnode_uuid, op.pnode) = \
2419 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2420 if len(node_names) > 1:
2421 (op.snode_uuid, op.snode) = \
2422 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2423
2424 jobs.append([op])
2425
2426 missing = set(op2inst.keys()) - set(failed)
2427 assert not missing, \
2428 "Iallocator did return incomplete result: %s" % \
2429 utils.CommaJoin(missing)
2430 else:
2431 jobs.extend([op] for op in self.op.instances)
2432
2433 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2434
2435
2436 class _InstNicModPrivate(object):
2437 """Data structure for network interface modifications.
2438
2439 Used by L{LUInstanceSetParams}.
2440
2441 """
2442 def __init__(self):
2443 self.params = None
2444 self.filled = None
2445
2446
2447 def _PrepareContainerMods(mods, private_fn):
2448 """Prepares a list of container modifications by adding a private data field.
2449
2450 @type mods: list of tuples; (operation, index, parameters)
2451 @param mods: List of modifications
2452 @type private_fn: callable or None
2453 @param private_fn: Callable for constructing a private data field for a
2454 modification
2455 @rtype: list
2456
2457 """
2458 if private_fn is None:
2459 fn = lambda: None
2460 else:
2461 fn = private_fn
2462
2463 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2464
2465
2466 def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2467 """Checks if nodes have enough physical CPUs
2468
2469 This function checks if all given nodes have the needed number of
2470 physical CPUs. In case any node has less CPUs or we cannot get the
2471 information from the node, this function raises an OpPrereqError
2472 exception.
2473
2474 @type lu: C{LogicalUnit}
2475 @param lu: a logical unit from which we get configuration data
2476 @type node_uuids: C{list}
2477 @param node_uuids: the list of node UUIDs to check
2478 @type requested: C{int}
2479 @param requested: the minimum acceptable number of physical CPUs
2480 @type hypervisor_specs: list of pairs (string, dict of strings)
2481 @param hypervisor_specs: list of hypervisor specifications in
2482 pairs (hypervisor_name, hvparams)
2483 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2484 or we cannot check the node
2485
2486 """
2487 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2488 for node_uuid in node_uuids:
2489 info = nodeinfo[node_uuid]
2490 node_name = lu.cfg.GetNodeName(node_uuid)
2491 info.Raise("Cannot get current information from node %s" % node_name,
2492 prereq=True, ecode=errors.ECODE_ENVIRON)
2493 (_, _, (hv_info, )) = info.payload
2494 num_cpus = hv_info.get("cpu_total", None)
2495 if not isinstance(num_cpus, int):
2496 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2497 " on node %s, result was '%s'" %
2498 (node_name, num_cpus), errors.ECODE_ENVIRON)
2499 if requested > num_cpus:
2500 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2501 "required" % (node_name, num_cpus, requested),
2502 errors.ECODE_NORES)
2503
2504
2505 def GetItemFromContainer(identifier, kind, container):
2506 """Return the item refered by the identifier.
2507
2508 @type identifier: string
2509 @param identifier: Item index or name or UUID
2510 @type kind: string
2511 @param kind: One-word item description
2512 @type container: list
2513 @param container: Container to get the item from
2514
2515 """
2516 # Index
2517 try:
2518 idx = int(identifier)
2519 if idx == -1:
2520 # Append
2521 absidx = len(container) - 1
2522 elif idx < 0:
2523 raise IndexError("Not accepting negative indices other than -1")
2524 elif idx > len(container):
2525 raise IndexError("Got %s index %s, but there are only %s" %
2526 (kind, idx, len(container)))
2527 else:
2528 absidx = idx
2529 return (absidx, container[idx])
2530 except ValueError:
2531 pass
2532
2533 for idx, item in enumerate(container):
2534 if item.uuid == identifier or item.name == identifier:
2535 return (idx, item)
2536
2537 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2538 (kind, identifier), errors.ECODE_NOENT)
2539
2540
2541 def _ApplyContainerMods(kind, container, chgdesc, mods,
2542 create_fn, modify_fn, remove_fn,
2543 post_add_fn=None):
2544 """Applies descriptions in C{mods} to C{container}.
2545
2546 @type kind: string
2547 @param kind: One-word item description
2548 @type container: list
2549 @param container: Container to modify
2550 @type chgdesc: None or list
2551 @param chgdesc: List of applied changes
2552 @type mods: list
2553 @param mods: Modifications as returned by L{_PrepareContainerMods}
2554 @type create_fn: callable
2555 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2556 receives absolute item index, parameters and private data object as added
2557 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2558 as list
2559 @type modify_fn: callable
2560 @param modify_fn: Callback for modifying an existing item
2561 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2562 and private data object as added by L{_PrepareContainerMods}, returns
2563 changes as list
2564 @type remove_fn: callable
2565 @param remove_fn: Callback on removing item; receives absolute item index,
2566 item and private data object as added by L{_PrepareContainerMods}
2567 @type post_add_fn: callable
2568 @param post_add_fn: Callable for post-processing a newly created item after
2569 it has been put into the container. It receives the index of the new item
2570 and the new item as parameters.
2571
2572 """
2573 for (op, identifier, params, private) in mods:
2574 changes = None
2575
2576 if op == constants.DDM_ADD:
2577 # Calculate where item will be added
2578 # When adding an item, identifier can only be an index
2579 try:
2580 idx = int(identifier)
2581 except ValueError:
2582 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2583 " identifier for %s" % constants.DDM_ADD,
2584 errors.ECODE_INVAL)
2585 if idx == -1:
2586 addidx = len(container)
2587 else:
2588 if idx < 0:
2589 raise IndexError("Not accepting negative indices other than -1")
2590 elif idx > len(container):
2591 raise IndexError("Got %s index %s, but there are only %s" %
2592 (kind, idx, len(container)))
2593 addidx = idx
2594
2595 if create_fn is None:
2596 item = params
2597 else:
2598 (item, changes) = create_fn(addidx, params, private)
2599
2600 if idx == -1:
2601 container.append(item)
2602 else:
2603 assert idx >= 0
2604 assert idx <= len(container)
2605 # list.insert does so before the specified index
2606 container.insert(idx, item)
2607
2608 if post_add_fn is not None:
2609 post_add_fn(addidx, item)
2610
2611 else:
2612 # Retrieve existing item
2613 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2614
2615 if op == constants.DDM_REMOVE:
2616 assert not params
2617
2618 changes = [("%s/%s" % (kind, absidx), "remove")]
2619
2620 if remove_fn is not None:
2621 msg = remove_fn(absidx, item, private)
2622 if msg:
2623 changes.append(("%s/%s" % (kind, absidx), msg))
2624
2625 assert container[absidx] == item
2626 del container[absidx]
2627 elif op == constants.DDM_MODIFY:
2628 if modify_fn is not None:
2629 changes = modify_fn(absidx, item, params, private)
2630 else:
2631 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2632
2633 assert _TApplyContModsCbChanges(changes)
2634
2635 if not (chgdesc is None or changes is None):
2636 chgdesc.extend(changes)
2637
2638
2639 class LUInstanceSetParams(LogicalUnit):
2640 """Modifies an instances's parameters.
2641
2642 """
2643 HPATH = "instance-modify"
2644 HTYPE = constants.HTYPE_INSTANCE
2645 REQ_BGL = False
2646
2647 @staticmethod
2648 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2649 assert ht.TList(mods)
2650 assert not mods or len(mods[0]) in (2, 3)
2651
2652 if mods and len(mods[0]) == 2:
2653 result = []
2654
2655 addremove = 0
2656 for op, params in mods:
2657 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2658 result.append((op, -1, params))
2659 addremove += 1
2660
2661 if addremove > 1:
2662 raise errors.OpPrereqError("Only one %s add or remove operation is"
2663 " supported at a time" % kind,
2664 errors.ECODE_INVAL)
2665 else:
2666 result.append((constants.DDM_MODIFY, op, params))
2667
2668 assert verify_fn(result)
2669 else:
2670 result = mods
2671
2672 return result
2673
2674 @staticmethod
2675 def _CheckMods(kind, mods, key_types, item_fn):
2676 """Ensures requested disk/NIC modifications are valid.
2677
2678 """
2679 for (op, _, params) in mods:
2680 assert ht.TDict(params)
2681
2682 # If 'key_types' is an empty dict, we assume we have an
2683 # 'ext' template and thus do not ForceDictType
2684 if key_types:
2685 utils.ForceDictType(params, key_types)
2686
2687 if op == constants.DDM_REMOVE:
2688 if params:
2689 raise errors.OpPrereqError("No settings should be passed when"
2690 " removing a %s" % kind,
2691 errors.ECODE_INVAL)
2692 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2693 item_fn(op, params)
2694 else:
2695 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2696
2697 def _VerifyDiskModification(self, op, params, excl_stor, group_access_type):
2698 """Verifies a disk modification.
2699
2700 """
2701 if op == constants.DDM_ADD:
2702 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2703 if mode not in constants.DISK_ACCESS_SET:
2704 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2705 errors.ECODE_INVAL)
2706
2707 size = params.get(constants.IDISK_SIZE, None)
2708 if size is None:
2709 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2710 constants.IDISK_SIZE, errors.ECODE_INVAL)
2711 size = int(size)
2712
2713 params[constants.IDISK_SIZE] = size
2714 name = params.get(constants.IDISK_NAME, None)
2715 if name is not None and name.lower() == constants.VALUE_NONE:
2716 params[constants.IDISK_NAME] = None
2717
2718 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2719
2720 # Check disk access param (only for specific disks)
2721 if self.instance.disk_template in constants.DTS_HAVE_ACCESS:
2722 access_type = params.get(constants.IDISK_ACCESS, group_access_type)
2723 if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
2724 self.instance.disk_template,
2725 access_type):
2726 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
2727 " used with %s disk access param" %
2728 (self.instance.hypervisor, access_type),
2729 errors.ECODE_STATE)
2730
2731 elif op == constants.DDM_MODIFY:
2732 if constants.IDISK_SIZE in params:
2733 raise errors.OpPrereqError("Disk size change not possible, use"
2734 " grow-disk", errors.ECODE_INVAL)
2735
2736 # Disk modification supports changing only the disk name and mode.
2737 # Changing arbitrary parameters is allowed only for ext disk template",
2738 if self.instance.disk_template != constants.DT_EXT:
2739 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2740 else:
2741 # We have to check that 'access' parameter can not be modified
2742 if constants.IDISK_ACCESS in params:
2743 raise errors.OpPrereqError("Disk 'access' parameter change is"
2744 " not possible", errors.ECODE_INVAL)
2745
2746 name = params.get(constants.IDISK_NAME, None)
2747 if name is not None and name.lower() == constants.VALUE_NONE:
2748 params[constants.IDISK_NAME] = None
2749
2750 @staticmethod
2751 def _VerifyNicModification(op, params):
2752 """Verifies a network interface modification.
2753
2754 """
2755 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2756 ip = params.get(constants.INIC_IP, None)
2757 name = params.get(constants.INIC_NAME, None)
2758 req_net = params.get(constants.INIC_NETWORK, None)
2759 link = params.get(constants.NIC_LINK, None)
2760 mode = params.get(constants.NIC_MODE, None)
2761 if name is not None and name.lower() == constants.VALUE_NONE:
2762 params[constants.INIC_NAME] = None
2763 if req_net is not None:
2764 if req_net.lower() == constants.VALUE_NONE:
2765 params[constants.INIC_NETWORK] = None
2766 req_net = None
2767 elif link is not None or mode is not None:
2768 raise errors.OpPrereqError("If network is given"
2769 " mode or link should not",
2770 errors.ECODE_INVAL)
2771
2772 if op == constants.DDM_ADD:
2773 macaddr = params.get(constants.INIC_MAC, None)
2774 if macaddr is None:
2775 params[constants.INIC_MAC] = constants.VALUE_AUTO
2776
2777 if ip is not None:
2778 if ip.lower() == constants.VALUE_NONE:
2779 params[constants.INIC_IP] = None
2780 else:
2781 if ip.lower() == constants.NIC_IP_POOL:
2782 if op == constants.DDM_ADD and req_net is None:
2783 raise errors.OpPrereqError("If ip=pool, parameter network"
2784 " cannot be none",
2785 errors.ECODE_INVAL)
2786 else:
2787 if not netutils.IPAddress.IsValid(ip):
2788 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2789 errors.ECODE_INVAL)
2790
2791 if constants.INIC_MAC in params:
2792 macaddr = params[constants.INIC_MAC]
2793 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2794 macaddr = utils.NormalizeAndValidateMac(macaddr)
2795
2796 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2797 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2798 " modifying an existing NIC",
2799 errors.ECODE_INVAL)
2800
2801 def CheckArguments(self):
2802 if not (self.op.nics or self.op.disks or self.op.disk_template or
2803 self.op.hvparams or self.op.beparams or self.op.os_name or
2804 self.op.osparams or self.op.offline is not None or
2805 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2806 self.op.instance_communication is not None):
2807 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2808
2809 if self.op.hvparams:
2810 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2811 "hypervisor", "instance", "cluster")
2812
2813 self.op.disks = self._UpgradeDiskNicMods(
2814 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2815 self.op.nics = self._UpgradeDiskNicMods(
2816 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2817
2818 # Check disk template modifications
2819 if self.op.disk_template:
2820 if self.op.disks:
2821 raise errors.OpPrereqError("Disk template conversion and other disk"
2822 " changes not supported at the same time",
2823 errors.ECODE_INVAL)
2824
2825 # mirrored template node checks
2826 if self.op.disk_template in constants.DTS_INT_MIRROR:
2827 if not self.op.remote_node:
2828 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2829 " one requires specifying a secondary"
2830 " node", errors.ECODE_INVAL)
2831 elif self.op.remote_node:
2832 self.LogWarning("Changing the disk template to a non-mirrored one,"
2833 " the secondary node will be ignored")
2834 # the secondary node must be cleared in order to be ignored, otherwise
2835 # the operation will fail, in the GenerateDiskTemplate method
2836 self.op.remote_node = None
2837
2838 # file-based template checks
2839 if self.op.disk_template in constants.DTS_FILEBASED:
2840 if not self.op.file_driver:
2841 self.op.file_driver = constants.FD_DEFAULT
2842 elif self.op.file_driver not in constants.FILE_DRIVER:
2843 raise errors.OpPrereqError("Invalid file driver name '%s'" %
2844 self.op.file_driver, errors.ECODE_INVAL)
2845
2846 # Check NIC modifications
2847 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2848 self._VerifyNicModification)
2849
2850 if self.op.pnode:
2851 (self.op.pnode_uuid, self.op.pnode) = \
2852 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2853
2854 def ExpandNames(self):
2855 self._ExpandAndLockInstance()
2856 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2857 # Can't even acquire node locks in shared mode as upcoming changes in
2858 # Ganeti 2.6 will start to modify the node object on disk conversion
2859 self.needed_locks[locking.LEVEL_NODE] = []
2860 self.needed_locks[locking.LEVEL_NODE_RES] = []
2861 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2862 # Look node group to look up the ipolicy
2863 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2864 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
2865 self.dont_collate_locks[locking.LEVEL_NODE] = True
2866 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
2867
2868 def DeclareLocks(self, level):
2869 if level == locking.LEVEL_NODEGROUP:
2870 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2871 # Acquire locks for the instance's nodegroups optimistically. Needs
2872 # to be verified in CheckPrereq
2873 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2874 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2875 elif level == locking.LEVEL_NODE:
2876 self._LockInstancesNodes()
2877 if self.op.disk_template and self.op.remote_node:
2878 (self.op.remote_node_uuid, self.op.remote_node) = \
2879 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2880 self.op.remote_node)
2881 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2882 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2883 # Copy node locks
2884 self.needed_locks[locking.LEVEL_NODE_RES] = \
2885 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2886
2887 def BuildHooksEnv(self):
2888 """Build hooks env.
2889
2890 This runs on the master, primary and secondaries.
2891
2892 """
2893 args = {}
2894 if constants.BE_MINMEM in self.be_new:
2895 args["minmem"] = self.be_new[constants.BE_MINMEM]
2896 if constants.BE_MAXMEM in self.be_new:
2897 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2898 if constants.BE_VCPUS in self.be_new:
2899 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2900 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2901 # information at all.
2902
2903 if self._new_nics is not None:
2904 nics = []
2905
2906 for nic in self._new_nics:
2907 n = copy.deepcopy(nic)
2908 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2909 n.nicparams = nicparams
2910 nics.append(NICToTuple(self, n))
2911
2912 args["nics"] = nics
2913
2914 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2915 if self.op.disk_template:
2916 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2917 if self.op.runtime_mem:
2918 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2919
2920 return env
2921
2922 def BuildHooksNodes(self):
2923 """Build hooks nodes.
2924
2925 """
2926 nl = [self.cfg.GetMasterNode()] + \
2927 list(self.cfg.GetInstanceNodes(self.instance.uuid))
2928 return (nl, nl)
2929
2930 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2931 old_params, cluster, pnode_uuid):
2932
2933 update_params_dict = dict([(key, params[key])
2934 for key in constants.NICS_PARAMETERS
2935 if key in params])
2936
2937 req_link = update_params_dict.get(constants.NIC_LINK, None)
2938 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2939
2940 new_net_uuid = None
2941 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2942 if new_net_uuid_or_name:
2943 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2944 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2945
2946 if old_net_uuid:
2947 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2948
2949 if new_net_uuid:
2950 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2951 if not netparams:
2952 raise errors.OpPrereqError("No netparams found for the network"
2953 " %s, probably not connected" %
2954 new_net_obj.name, errors.ECODE_INVAL)
2955 new_params = dict(netparams)
2956 else:
2957 new_params = GetUpdatedParams(old_params, update_params_dict)
2958
2959 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2960
2961 new_filled_params = cluster.SimpleFillNIC(new_params)
2962 objects.NIC.CheckParameterSyntax(new_filled_params)
2963
2964 new_mode = new_filled_params[constants.NIC_MODE]
2965 if new_mode == constants.NIC_MODE_BRIDGED:
2966 bridge = new_filled_params[constants.NIC_LINK]
2967 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2968 if msg:
2969 msg = "Error checking bridges on node '%s': %s" % \
2970 (self.cfg.GetNodeName(pnode_uuid), msg)
2971 if self.op.force:
2972 self.warn.append(msg)
2973 else:
2974 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2975
2976 elif new_mode == constants.NIC_MODE_ROUTED:
2977 ip = params.get(constants.INIC_IP, old_ip)
2978 if ip is None and not new_net_uuid:
2979 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2980 " on a routed NIC if not attached to a"
2981 " network", errors.ECODE_INVAL)
2982
2983 elif new_mode == constants.NIC_MODE_OVS:
2984 # TODO: check OVS link
2985 self.LogInfo("OVS links are currently not checked for correctness")
2986
2987 if constants.INIC_MAC in params:
2988 mac = params[constants.INIC_MAC]
2989 if mac is None:
2990 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2991 errors.ECODE_INVAL)
2992 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2993 # otherwise generate the MAC address
2994 params[constants.INIC_MAC] = \
2995 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2996 else:
2997 # or validate/reserve the current one
2998 try:
2999 self.cfg.ReserveMAC(mac, self.proc.GetECId())
3000 except errors.ReservationError:
3001 raise errors.OpPrereqError("MAC address '%s' already in use"
3002 " in cluster" % mac,
3003 errors.ECODE_NOTUNIQUE)
3004 elif new_net_uuid != old_net_uuid:
3005
3006 def get_net_prefix(net_uuid):
3007 mac_prefix = None
3008 if net_uuid:
3009 nobj = self.cfg.GetNetwork(net_uuid)
3010 mac_prefix = nobj.mac_prefix
3011
3012 return mac_prefix
3013
3014 new_prefix = get_net_prefix(new_net_uuid)
3015 old_prefix = get_net_prefix(old_net_uuid)
3016 if old_prefix != new_prefix: