Update tests
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64
65 import ganeti.masterd.instance
66
67
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
69 #: callbacks
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72 ht.TNonEmptyString,
73 ht.TAny,
74 ])))
75
76
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
79
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
82
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
86
87 """
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
95 return hostname
96
97
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
100
101 """
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
105 errors.ECODE_INVAL)
106
107
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
110
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
117
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
119
120 """
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
124 tags=op.tags,
125 os=op.os_type,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
129 disks=disks,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
133
134
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
137
138 @param op: The instance opcode
139 @param cluster: The cluster config object
140
141 @return: The fully filled beparams
142
143 """
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
151
152
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
155
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
161
162 @returns: The build up nics
163
164 """
165 nics = []
166 for nic in op.nics:
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
175
176 if net is None or net.lower() == constants.VALUE_NONE:
177 net = None
178 else:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
182 errors.ECODE_INVAL)
183
184 # ip validity checks
185 if ip is None or ip.lower() == constants.VALUE_NONE:
186 nic_ip = None
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
191 errors.ECODE_INVAL)
192 nic_ip = default_ip
193 else:
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
197 if net is None:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
200 errors.ECODE_INVAL)
201
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204 errors.ECODE_INVAL)
205
206 nic_ip = ip
207
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
211 errors.ECODE_INVAL)
212
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
217
218 try:
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
223 " in cluster" % mac,
224 errors.ECODE_NOTUNIQUE)
225
226 # Build nic parameters
227 nicparams = {}
228 if nic_mode_req:
229 nicparams[constants.NIC_MODE] = nic_mode
230 if link:
231 nicparams[constants.NIC_LINK] = link
232
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
238 name = None
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 nics.append(nic_obj)
243
244 return nics
245
246
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
249
250 @type ip: string
251 @param ip: IP address
252 @type node: string
253 @param node: node name
254
255 """
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
260 (ip, conf_net)),
261 errors.ECODE_STATE)
262
263 return (None, None)
264
265
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
270
271 @type ipolicy: dict
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
279
280 """
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
290
291
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
294
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
297 @type name: string
298 @param name: OS name passed by the user, to check for validity
299
300 """
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
303 if variant:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
306 errors.ECODE_INVAL)
307 return
308 if not variant:
309 raise errors.OpPrereqError("OS name must include a variant",
310 errors.ECODE_INVAL)
311
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314
315
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
318
319 """
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
322 REQ_BGL = False
323
324 def CheckArguments(self):
325 """Check arguments.
326
327 """
328 # do not require name_check to ease forward/backward compatibility
329 # for tools
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
336
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
341
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
347
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
350
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
358
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
365 has_adopt = True
366 else:
367 has_no_adopt = True
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
370 errors.ECODE_INVAL)
371 if has_adopt:
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
376 errors.ECODE_INVAL)
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
383 else:
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
388 errors.ECODE_INVAL)
389
390 self.adopt_disks = has_adopt
391
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
398 else:
399 self.check_ip = None
400
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
406
407 # set default file_driver if unset and required
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_LOOP
412
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
417
418 ### Node/iallocator related checks
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
420
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
426 elif self.op.snode:
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428 " template")
429 self.op.snode = None
430
431 _CheckOpportunisticLocking(self.op)
432
433 self._cds = GetClusterDomainSecret()
434
435 if self.op.mode == constants.INSTANCE_IMPORT:
436 # On import force_variant must be True, because if we forced it at
437 # initial install, our only chance when importing it back is that it
438 # works again!
439 self.op.force_variant = True
440
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
443
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
447 errors.ECODE_INVAL)
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
451 errors.ECODE_STATE)
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
454 errors.ECODE_INVAL)
455
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457 # Check handshake to ensure both clusters have the same domain secret
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
461 errors.ECODE_INVAL)
462
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464 src_handshake)
465 if errmsg:
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467 errors.ECODE_INVAL)
468
469 # Load and check source CA
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
473 errors.ECODE_INVAL)
474
475 try:
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477 self._cds)
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
481
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485 errors.ECODE_INVAL)
486
487 self.source_x509_ca = cert
488
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
492 errors.ECODE_INVAL)
493
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
496
497 else:
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
500
501 def ExpandNames(self):
502 """ExpandNames for CreateInstance.
503
504 Figure out the right locks for instance creation.
505
506 """
507 self.needed_locks = {}
508
509 instance_name = self.op.instance_name
510 # this is just a preventive check, but someone might still add this
511 # instance in the meantime, and creation will fail at lock-add time
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
515
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517
518 if self.op.iallocator:
519 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520 # specifying a group on instance creation and then selecting nodes from
521 # that group
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
527 else:
528 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529 nodelist = [self.op.pnode]
530 if self.op.snode is not None:
531 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532 nodelist.append(self.op.snode)
533 self.needed_locks[locking.LEVEL_NODE] = nodelist
534
535 # in case of import lock the source node too
536 if self.op.mode == constants.INSTANCE_IMPORT:
537 src_node = self.op.src_node
538 src_path = self.op.src_path
539
540 if src_path is None:
541 self.op.src_path = src_path = self.op.instance_name
542
543 if src_node is None:
544 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546 self.op.src_node = None
547 if os.path.isabs(src_path):
548 raise errors.OpPrereqError("Importing an instance from a path"
549 " requires a source node option",
550 errors.ECODE_INVAL)
551 else:
552 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554 self.needed_locks[locking.LEVEL_NODE].append(src_node)
555 if not os.path.isabs(src_path):
556 self.op.src_path = src_path = \
557 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558
559 self.needed_locks[locking.LEVEL_NODE_RES] = \
560 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561
562 def DeclareLocks(self, level):
563 if level == locking.LEVEL_NODE_RES and \
564 self.opportunistic_locks[locking.LEVEL_NODE]:
565 # Even when using opportunistic locking, we require the same set of
566 # NODE_RES locks as we got NODE locks
567 self.needed_locks[locking.LEVEL_NODE_RES] = \
568 self.owned_locks(locking.LEVEL_NODE)
569
570 def _RunAllocator(self):
571 """Run the allocator based on input opcode.
572
573 """
574 if self.op.opportunistic_locking:
575 # Only consider nodes for which a lock is held
576 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
577 else:
578 node_whitelist = None
579
580 #TODO Export network to iallocator so that it chooses a pnode
581 # in a nodegroup that has the desired network connected to
582 req = _CreateInstanceAllocRequest(self.op, self.disks,
583 self.nics, self.be_full,
584 node_whitelist)
585 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
586
587 ial.Run(self.op.iallocator)
588
589 if not ial.success:
590 # When opportunistic locks are used only a temporary failure is generated
591 if self.op.opportunistic_locking:
592 ecode = errors.ECODE_TEMP_NORES
593 else:
594 ecode = errors.ECODE_NORES
595
596 raise errors.OpPrereqError("Can't compute nodes using"
597 " iallocator '%s': %s" %
598 (self.op.iallocator, ial.info),
599 ecode)
600
601 self.op.pnode = ial.result[0]
602 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
603 self.op.instance_name, self.op.iallocator,
604 utils.CommaJoin(ial.result))
605
606 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
607
608 if req.RequiredNodes() == 2:
609 self.op.snode = ial.result[1]
610
611 def BuildHooksEnv(self):
612 """Build hooks env.
613
614 This runs on master, primary and secondary nodes of the instance.
615
616 """
617 env = {
618 "ADD_MODE": self.op.mode,
619 }
620 if self.op.mode == constants.INSTANCE_IMPORT:
621 env["SRC_NODE"] = self.op.src_node
622 env["SRC_PATH"] = self.op.src_path
623 env["SRC_IMAGES"] = self.src_images
624
625 env.update(BuildInstanceHookEnv(
626 name=self.op.instance_name,
627 primary_node=self.op.pnode,
628 secondary_nodes=self.secondaries,
629 status=self.op.start,
630 os_type=self.op.os_type,
631 minmem=self.be_full[constants.BE_MINMEM],
632 maxmem=self.be_full[constants.BE_MAXMEM],
633 vcpus=self.be_full[constants.BE_VCPUS],
634 nics=NICListToTuple(self, self.nics),
635 disk_template=self.op.disk_template,
636 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
637 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
638 for d in self.disks],
639 bep=self.be_full,
640 hvp=self.hv_full,
641 hypervisor_name=self.op.hypervisor,
642 tags=self.op.tags,
643 ))
644
645 return env
646
647 def BuildHooksNodes(self):
648 """Build hooks nodes.
649
650 """
651 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
652 return nl, nl
653
654 def _ReadExportInfo(self):
655 """Reads the export information from disk.
656
657 It will override the opcode source node and path with the actual
658 information, if these two were not specified before.
659
660 @return: the export information
661
662 """
663 assert self.op.mode == constants.INSTANCE_IMPORT
664
665 src_node = self.op.src_node
666 src_path = self.op.src_path
667
668 if src_node is None:
669 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
670 exp_list = self.rpc.call_export_list(locked_nodes)
671 found = False
672 for node in exp_list:
673 if exp_list[node].fail_msg:
674 continue
675 if src_path in exp_list[node].payload:
676 found = True
677 self.op.src_node = src_node = node
678 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
679 src_path)
680 break
681 if not found:
682 raise errors.OpPrereqError("No export found for relative path %s" %
683 src_path, errors.ECODE_INVAL)
684
685 CheckNodeOnline(self, src_node)
686 result = self.rpc.call_export_info(src_node, src_path)
687 result.Raise("No export or invalid export found in dir %s" % src_path)
688
689 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
690 if not export_info.has_section(constants.INISECT_EXP):
691 raise errors.ProgrammerError("Corrupted export config",
692 errors.ECODE_ENVIRON)
693
694 ei_version = export_info.get(constants.INISECT_EXP, "version")
695 if (int(ei_version) != constants.EXPORT_VERSION):
696 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
697 (ei_version, constants.EXPORT_VERSION),
698 errors.ECODE_ENVIRON)
699 return export_info
700
701 def _ReadExportParams(self, einfo):
702 """Use export parameters as defaults.
703
704 In case the opcode doesn't specify (as in override) some instance
705 parameters, then try to use them from the export information, if
706 that declares them.
707
708 """
709 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
710
711 if self.op.disk_template is None:
712 if einfo.has_option(constants.INISECT_INS, "disk_template"):
713 self.op.disk_template = einfo.get(constants.INISECT_INS,
714 "disk_template")
715 if self.op.disk_template not in constants.DISK_TEMPLATES:
716 raise errors.OpPrereqError("Disk template specified in configuration"
717 " file is not one of the allowed values:"
718 " %s" %
719 " ".join(constants.DISK_TEMPLATES),
720 errors.ECODE_INVAL)
721 else:
722 raise errors.OpPrereqError("No disk template specified and the export"
723 " is missing the disk_template information",
724 errors.ECODE_INVAL)
725
726 if not self.op.disks:
727 disks = []
728 # TODO: import the disk iv_name too
729 for idx in range(constants.MAX_DISKS):
730 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
731 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
732 disks.append({constants.IDISK_SIZE: disk_sz})
733 self.op.disks = disks
734 if not disks and self.op.disk_template != constants.DT_DISKLESS:
735 raise errors.OpPrereqError("No disk info specified and the export"
736 " is missing the disk information",
737 errors.ECODE_INVAL)
738
739 if not self.op.nics:
740 nics = []
741 for idx in range(constants.MAX_NICS):
742 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
743 ndict = {}
744 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
745 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
746 ndict[name] = v
747 nics.append(ndict)
748 else:
749 break
750 self.op.nics = nics
751
752 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
753 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
754
755 if (self.op.hypervisor is None and
756 einfo.has_option(constants.INISECT_INS, "hypervisor")):
757 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
758
759 if einfo.has_section(constants.INISECT_HYP):
760 # use the export parameters but do not override the ones
761 # specified by the user
762 for name, value in einfo.items(constants.INISECT_HYP):
763 if name not in self.op.hvparams:
764 self.op.hvparams[name] = value
765
766 if einfo.has_section(constants.INISECT_BEP):
767 # use the parameters, without overriding
768 for name, value in einfo.items(constants.INISECT_BEP):
769 if name not in self.op.beparams:
770 self.op.beparams[name] = value
771 # Compatibility for the old "memory" be param
772 if name == constants.BE_MEMORY:
773 if constants.BE_MAXMEM not in self.op.beparams:
774 self.op.beparams[constants.BE_MAXMEM] = value
775 if constants.BE_MINMEM not in self.op.beparams:
776 self.op.beparams[constants.BE_MINMEM] = value
777 else:
778 # try to read the parameters old style, from the main section
779 for name in constants.BES_PARAMETERS:
780 if (name not in self.op.beparams and
781 einfo.has_option(constants.INISECT_INS, name)):
782 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
783
784 if einfo.has_section(constants.INISECT_OSP):
785 # use the parameters, without overriding
786 for name, value in einfo.items(constants.INISECT_OSP):
787 if name not in self.op.osparams:
788 self.op.osparams[name] = value
789
790 def _RevertToDefaults(self, cluster):
791 """Revert the instance parameters to the default values.
792
793 """
794 # hvparams
795 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
796 for name in self.op.hvparams.keys():
797 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
798 del self.op.hvparams[name]
799 # beparams
800 be_defs = cluster.SimpleFillBE({})
801 for name in self.op.beparams.keys():
802 if name in be_defs and be_defs[name] == self.op.beparams[name]:
803 del self.op.beparams[name]
804 # nic params
805 nic_defs = cluster.SimpleFillNIC({})
806 for nic in self.op.nics:
807 for name in constants.NICS_PARAMETERS:
808 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
809 del nic[name]
810 # osparams
811 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
812 for name in self.op.osparams.keys():
813 if name in os_defs and os_defs[name] == self.op.osparams[name]:
814 del self.op.osparams[name]
815
816 def _CalculateFileStorageDir(self):
817 """Calculate final instance file storage dir.
818
819 """
820 # file storage dir calculation/check
821 self.instance_file_storage_dir = None
822 if self.op.disk_template in constants.DTS_FILEBASED:
823 # build the full file storage dir path
824 joinargs = []
825
826 if self.op.disk_template == constants.DT_SHARED_FILE:
827 get_fsd_fn = self.cfg.GetSharedFileStorageDir
828 else:
829 get_fsd_fn = self.cfg.GetFileStorageDir
830
831 cfg_storagedir = get_fsd_fn()
832 if not cfg_storagedir:
833 raise errors.OpPrereqError("Cluster file storage dir not defined",
834 errors.ECODE_STATE)
835 joinargs.append(cfg_storagedir)
836
837 if self.op.file_storage_dir is not None:
838 joinargs.append(self.op.file_storage_dir)
839
840 joinargs.append(self.op.instance_name)
841
842 # pylint: disable=W0142
843 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
844
845 def CheckPrereq(self): # pylint: disable=R0914
846 """Check prerequisites.
847
848 """
849 self._CalculateFileStorageDir()
850
851 if self.op.mode == constants.INSTANCE_IMPORT:
852 export_info = self._ReadExportInfo()
853 self._ReadExportParams(export_info)
854 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
855 else:
856 self._old_instance_name = None
857
858 if (not self.cfg.GetVGName() and
859 self.op.disk_template not in constants.DTS_NOT_LVM):
860 raise errors.OpPrereqError("Cluster does not support lvm-based"
861 " instances", errors.ECODE_STATE)
862
863 if (self.op.hypervisor is None or
864 self.op.hypervisor == constants.VALUE_AUTO):
865 self.op.hypervisor = self.cfg.GetHypervisorType()
866
867 cluster = self.cfg.GetClusterInfo()
868 enabled_hvs = cluster.enabled_hypervisors
869 if self.op.hypervisor not in enabled_hvs:
870 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
871 " cluster (%s)" %
872 (self.op.hypervisor, ",".join(enabled_hvs)),
873 errors.ECODE_STATE)
874
875 # Check tag validity
876 for tag in self.op.tags:
877 objects.TaggableObject.ValidateTag(tag)
878
879 # check hypervisor parameter syntax (locally)
880 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
881 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
882 self.op.hvparams)
883 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
884 hv_type.CheckParameterSyntax(filled_hvp)
885 self.hv_full = filled_hvp
886 # check that we don't specify global parameters on an instance
887 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
888 "instance", "cluster")
889
890 # fill and remember the beparams dict
891 self.be_full = _ComputeFullBeParams(self.op, cluster)
892
893 # build os parameters
894 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
895
896 # now that hvp/bep are in final format, let's reset to defaults,
897 # if told to do so
898 if self.op.identify_defaults:
899 self._RevertToDefaults(cluster)
900
901 # NIC buildup
902 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
903 self.proc.GetECId())
904
905 # disk checks/pre-build
906 default_vg = self.cfg.GetVGName()
907 self.disks = ComputeDisks(self.op, default_vg)
908
909 if self.op.mode == constants.INSTANCE_IMPORT:
910 disk_images = []
911 for idx in range(len(self.disks)):
912 option = "disk%d_dump" % idx
913 if export_info.has_option(constants.INISECT_INS, option):
914 # FIXME: are the old os-es, disk sizes, etc. useful?
915 export_name = export_info.get(constants.INISECT_INS, option)
916 image = utils.PathJoin(self.op.src_path, export_name)
917 disk_images.append(image)
918 else:
919 disk_images.append(False)
920
921 self.src_images = disk_images
922
923 if self.op.instance_name == self._old_instance_name:
924 for idx, nic in enumerate(self.nics):
925 if nic.mac == constants.VALUE_AUTO:
926 nic_mac_ini = "nic%d_mac" % idx
927 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
928
929 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
930
931 # ip ping checks (we use the same ip that was resolved in ExpandNames)
932 if self.op.ip_check:
933 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
934 raise errors.OpPrereqError("IP %s of instance %s already in use" %
935 (self.check_ip, self.op.instance_name),
936 errors.ECODE_NOTUNIQUE)
937
938 #### mac address generation
939 # By generating here the mac address both the allocator and the hooks get
940 # the real final mac address rather than the 'auto' or 'generate' value.
941 # There is a race condition between the generation and the instance object
942 # creation, which means that we know the mac is valid now, but we're not
943 # sure it will be when we actually add the instance. If things go bad
944 # adding the instance will abort because of a duplicate mac, and the
945 # creation job will fail.
946 for nic in self.nics:
947 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
948 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
949
950 #### allocator run
951
952 if self.op.iallocator is not None:
953 self._RunAllocator()
954
955 # Release all unneeded node locks
956 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
957 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
958 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
959 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
960
961 assert (self.owned_locks(locking.LEVEL_NODE) ==
962 self.owned_locks(locking.LEVEL_NODE_RES)), \
963 "Node locks differ from node resource locks"
964
965 #### node related checks
966
967 # check primary node
968 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
969 assert self.pnode is not None, \
970 "Cannot retrieve locked node %s" % self.op.pnode
971 if pnode.offline:
972 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
973 pnode.name, errors.ECODE_STATE)
974 if pnode.drained:
975 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
976 pnode.name, errors.ECODE_STATE)
977 if not pnode.vm_capable:
978 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
979 " '%s'" % pnode.name, errors.ECODE_STATE)
980
981 self.secondaries = []
982
983 # Fill in any IPs from IP pools. This must happen here, because we need to
984 # know the nic's primary node, as specified by the iallocator
985 for idx, nic in enumerate(self.nics):
986 net_uuid = nic.network
987 if net_uuid is not None:
988 nobj = self.cfg.GetNetwork(net_uuid)
989 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
990 if netparams is None:
991 raise errors.OpPrereqError("No netparams found for network"
992 " %s. Propably not connected to"
993 " node's %s nodegroup" %
994 (nobj.name, self.pnode.name),
995 errors.ECODE_INVAL)
996 self.LogInfo("NIC/%d inherits netparams %s" %
997 (idx, netparams.values()))
998 nic.nicparams = dict(netparams)
999 if nic.ip is not None:
1000 if nic.ip.lower() == constants.NIC_IP_POOL:
1001 try:
1002 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1003 except errors.ReservationError:
1004 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1005 " from the address pool" % idx,
1006 errors.ECODE_STATE)
1007 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1008 else:
1009 try:
1010 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1011 except errors.ReservationError:
1012 raise errors.OpPrereqError("IP address %s already in use"
1013 " or does not belong to network %s" %
1014 (nic.ip, nobj.name),
1015 errors.ECODE_NOTUNIQUE)
1016
1017 # net is None, ip None or given
1018 elif self.op.conflicts_check:
1019 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1020
1021 # mirror node verification
1022 if self.op.disk_template in constants.DTS_INT_MIRROR:
1023 if self.op.snode == pnode.name:
1024 raise errors.OpPrereqError("The secondary node cannot be the"
1025 " primary node", errors.ECODE_INVAL)
1026 CheckNodeOnline(self, self.op.snode)
1027 CheckNodeNotDrained(self, self.op.snode)
1028 CheckNodeVmCapable(self, self.op.snode)
1029 self.secondaries.append(self.op.snode)
1030
1031 snode = self.cfg.GetNodeInfo(self.op.snode)
1032 if pnode.group != snode.group:
1033 self.LogWarning("The primary and secondary nodes are in two"
1034 " different node groups; the disk parameters"
1035 " from the first disk's node group will be"
1036 " used")
1037
1038 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1039 nodes = [pnode]
1040 if self.op.disk_template in constants.DTS_INT_MIRROR:
1041 nodes.append(snode)
1042 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1043 if compat.any(map(has_es, nodes)):
1044 raise errors.OpPrereqError("Disk template %s not supported with"
1045 " exclusive storage" % self.op.disk_template,
1046 errors.ECODE_STATE)
1047
1048 nodenames = [pnode.name] + self.secondaries
1049
1050 if not self.adopt_disks:
1051 if self.op.disk_template == constants.DT_RBD:
1052 # _CheckRADOSFreeSpace() is just a placeholder.
1053 # Any function that checks prerequisites can be placed here.
1054 # Check if there is enough space on the RADOS cluster.
1055 CheckRADOSFreeSpace()
1056 elif self.op.disk_template == constants.DT_EXT:
1057 # FIXME: Function that checks prereqs if needed
1058 pass
1059 else:
1060 # Check lv size requirements, if not adopting
1061 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1062 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1063
1064 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1065 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1066 disk[constants.IDISK_ADOPT])
1067 for disk in self.disks])
1068 if len(all_lvs) != len(self.disks):
1069 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1070 errors.ECODE_INVAL)
1071 for lv_name in all_lvs:
1072 try:
1073 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1074 # to ReserveLV uses the same syntax
1075 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1076 except errors.ReservationError:
1077 raise errors.OpPrereqError("LV named %s used by another instance" %
1078 lv_name, errors.ECODE_NOTUNIQUE)
1079
1080 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1081 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1082
1083 node_lvs = self.rpc.call_lv_list([pnode.name],
1084 vg_names.payload.keys())[pnode.name]
1085 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1086 node_lvs = node_lvs.payload
1087
1088 delta = all_lvs.difference(node_lvs.keys())
1089 if delta:
1090 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1091 utils.CommaJoin(delta),
1092 errors.ECODE_INVAL)
1093 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1094 if online_lvs:
1095 raise errors.OpPrereqError("Online logical volumes found, cannot"
1096 " adopt: %s" % utils.CommaJoin(online_lvs),
1097 errors.ECODE_STATE)
1098 # update the size of disk based on what is found
1099 for dsk in self.disks:
1100 dsk[constants.IDISK_SIZE] = \
1101 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1102 dsk[constants.IDISK_ADOPT])][0]))
1103
1104 elif self.op.disk_template == constants.DT_BLOCK:
1105 # Normalize and de-duplicate device paths
1106 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1107 for disk in self.disks])
1108 if len(all_disks) != len(self.disks):
1109 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1110 errors.ECODE_INVAL)
1111 baddisks = [d for d in all_disks
1112 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1113 if baddisks:
1114 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1115 " cannot be adopted" %
1116 (utils.CommaJoin(baddisks),
1117 constants.ADOPTABLE_BLOCKDEV_ROOT),
1118 errors.ECODE_INVAL)
1119
1120 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1121 list(all_disks))[pnode.name]
1122 node_disks.Raise("Cannot get block device information from node %s" %
1123 pnode.name)
1124 node_disks = node_disks.payload
1125 delta = all_disks.difference(node_disks.keys())
1126 if delta:
1127 raise errors.OpPrereqError("Missing block device(s): %s" %
1128 utils.CommaJoin(delta),
1129 errors.ECODE_INVAL)
1130 for dsk in self.disks:
1131 dsk[constants.IDISK_SIZE] = \
1132 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1133
1134 # Verify instance specs
1135 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1136 ispec = {
1137 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1138 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1139 constants.ISPEC_DISK_COUNT: len(self.disks),
1140 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1141 for disk in self.disks],
1142 constants.ISPEC_NIC_COUNT: len(self.nics),
1143 constants.ISPEC_SPINDLE_USE: spindle_use,
1144 }
1145
1146 group_info = self.cfg.GetNodeGroup(pnode.group)
1147 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1148 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1149 self.op.disk_template)
1150 if not self.op.ignore_ipolicy and res:
1151 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1152 (pnode.group, group_info.name, utils.CommaJoin(res)))
1153 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1154
1155 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1156
1157 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1158 # check OS parameters (remotely)
1159 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1160
1161 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1162
1163 #TODO: _CheckExtParams (remotely)
1164 # Check parameters for extstorage
1165
1166 # memory check on primary node
1167 #TODO(dynmem): use MINMEM for checking
1168 if self.op.start:
1169 CheckNodeFreeMemory(self, self.pnode.name,
1170 "creating instance %s" % self.op.instance_name,
1171 self.be_full[constants.BE_MAXMEM],
1172 self.op.hypervisor)
1173
1174 self.dry_run_result = list(nodenames)
1175
1176 def Exec(self, feedback_fn):
1177 """Create and add the instance to the cluster.
1178
1179 """
1180 instance = self.op.instance_name
1181 pnode_name = self.pnode.name
1182
1183 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1184 self.owned_locks(locking.LEVEL_NODE)), \
1185 "Node locks differ from node resource locks"
1186 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1187
1188 ht_kind = self.op.hypervisor
1189 if ht_kind in constants.HTS_REQ_PORT:
1190 network_port = self.cfg.AllocatePort()
1191 else:
1192 network_port = None
1193
1194 # This is ugly but we got a chicken-egg problem here
1195 # We can only take the group disk parameters, as the instance
1196 # has no disks yet (we are generating them right here).
1197 node = self.cfg.GetNodeInfo(pnode_name)
1198 nodegroup = self.cfg.GetNodeGroup(node.group)
1199 disks = GenerateDiskTemplate(self,
1200 self.op.disk_template,
1201 instance, pnode_name,
1202 self.secondaries,
1203 self.disks,
1204 self.instance_file_storage_dir,
1205 self.op.file_driver,
1206 0,
1207 feedback_fn,
1208 self.cfg.GetGroupDiskParams(nodegroup))
1209
1210 iobj = objects.Instance(name=instance, os=self.op.os_type,
1211 primary_node=pnode_name,
1212 nics=self.nics, disks=disks,
1213 disk_template=self.op.disk_template,
1214 disks_active=False,
1215 admin_state=constants.ADMINST_DOWN,
1216 network_port=network_port,
1217 beparams=self.op.beparams,
1218 hvparams=self.op.hvparams,
1219 hypervisor=self.op.hypervisor,
1220 osparams=self.op.osparams,
1221 )
1222
1223 if self.op.tags:
1224 for tag in self.op.tags:
1225 iobj.AddTag(tag)
1226
1227 if self.adopt_disks:
1228 if self.op.disk_template == constants.DT_PLAIN:
1229 # rename LVs to the newly-generated names; we need to construct
1230 # 'fake' LV disks with the old data, plus the new unique_id
1231 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1232 rename_to = []
1233 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1234 rename_to.append(t_dsk.logical_id)
1235 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1236 self.cfg.SetDiskID(t_dsk, pnode_name)
1237 result = self.rpc.call_blockdev_rename(pnode_name,
1238 zip(tmp_disks, rename_to))
1239 result.Raise("Failed to rename adoped LVs")
1240 else:
1241 feedback_fn("* creating instance disks...")
1242 try:
1243 CreateDisks(self, iobj)
1244 except errors.OpExecError:
1245 self.LogWarning("Device creation failed")
1246 self.cfg.ReleaseDRBDMinors(instance)
1247 raise
1248
1249 feedback_fn("adding instance %s to cluster config" % instance)
1250
1251 self.cfg.AddInstance(iobj, self.proc.GetECId())
1252
1253 # Declare that we don't want to remove the instance lock anymore, as we've
1254 # added the instance to the config
1255 del self.remove_locks[locking.LEVEL_INSTANCE]
1256
1257 if self.op.mode == constants.INSTANCE_IMPORT:
1258 # Release unused nodes
1259 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1260 else:
1261 # Release all nodes
1262 ReleaseLocks(self, locking.LEVEL_NODE)
1263
1264 disk_abort = False
1265 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1266 feedback_fn("* wiping instance disks...")
1267 try:
1268 WipeDisks(self, iobj)
1269 except errors.OpExecError, err:
1270 logging.exception("Wiping disks failed")
1271 self.LogWarning("Wiping instance disks failed (%s)", err)
1272 disk_abort = True
1273
1274 if disk_abort:
1275 # Something is already wrong with the disks, don't do anything else
1276 pass
1277 elif self.op.wait_for_sync:
1278 disk_abort = not WaitForSync(self, iobj)
1279 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1280 # make sure the disks are not degraded (still sync-ing is ok)
1281 feedback_fn("* checking mirrors status")
1282 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1283 else:
1284 disk_abort = False
1285
1286 if disk_abort:
1287 RemoveDisks(self, iobj)
1288 self.cfg.RemoveInstance(iobj.name)
1289 # Make sure the instance lock gets removed
1290 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1291 raise errors.OpExecError("There are some degraded disks for"
1292 " this instance")
1293
1294 # instance disks are now active
1295 iobj.disks_active = True
1296
1297 # Release all node resource locks
1298 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1299
1300 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1301 # we need to set the disks ID to the primary node, since the
1302 # preceding code might or might have not done it, depending on
1303 # disk template and other options
1304 for disk in iobj.disks:
1305 self.cfg.SetDiskID(disk, pnode_name)
1306 if self.op.mode == constants.INSTANCE_CREATE:
1307 if not self.op.no_install:
1308 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1309 not self.op.wait_for_sync)
1310 if pause_sync:
1311 feedback_fn("* pausing disk sync to install instance OS")
1312 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1313 (iobj.disks,
1314 iobj), True)
1315 for idx, success in enumerate(result.payload):
1316 if not success:
1317 logging.warn("pause-sync of instance %s for disk %d failed",
1318 instance, idx)
1319
1320 feedback_fn("* running the instance OS create scripts...")
1321 # FIXME: pass debug option from opcode to backend
1322 os_add_result = \
1323 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1324 self.op.debug_level)
1325 if pause_sync:
1326 feedback_fn("* resuming disk sync")
1327 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1328 (iobj.disks,
1329 iobj), False)
1330 for idx, success in enumerate(result.payload):
1331 if not success:
1332 logging.warn("resume-sync of instance %s for disk %d failed",
1333 instance, idx)
1334
1335 os_add_result.Raise("Could not add os for instance %s"
1336 " on node %s" % (instance, pnode_name))
1337
1338 else:
1339 if self.op.mode == constants.INSTANCE_IMPORT:
1340 feedback_fn("* running the instance OS import scripts...")
1341
1342 transfers = []
1343
1344 for idx, image in enumerate(self.src_images):
1345 if not image:
1346 continue
1347
1348 # FIXME: pass debug option from opcode to backend
1349 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1350 constants.IEIO_FILE, (image, ),
1351 constants.IEIO_SCRIPT,
1352 (iobj.disks[idx], idx),
1353 None)
1354 transfers.append(dt)
1355
1356 import_result = \
1357 masterd.instance.TransferInstanceData(self, feedback_fn,
1358 self.op.src_node, pnode_name,
1359 self.pnode.secondary_ip,
1360 iobj, transfers)
1361 if not compat.all(import_result):
1362 self.LogWarning("Some disks for instance %s on node %s were not"
1363 " imported successfully" % (instance, pnode_name))
1364
1365 rename_from = self._old_instance_name
1366
1367 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1368 feedback_fn("* preparing remote import...")
1369 # The source cluster will stop the instance before attempting to make
1370 # a connection. In some cases stopping an instance can take a long
1371 # time, hence the shutdown timeout is added to the connection
1372 # timeout.
1373 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1374 self.op.source_shutdown_timeout)
1375 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1376
1377 assert iobj.primary_node == self.pnode.name
1378 disk_results = \
1379 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1380 self.source_x509_ca,
1381 self._cds, timeouts)
1382 if not compat.all(disk_results):
1383 # TODO: Should the instance still be started, even if some disks
1384 # failed to import (valid for local imports, too)?
1385 self.LogWarning("Some disks for instance %s on node %s were not"
1386 " imported successfully" % (instance, pnode_name))
1387
1388 rename_from = self.source_instance_name
1389
1390 else:
1391 # also checked in the prereq part
1392 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1393 % self.op.mode)
1394
1395 # Run rename script on newly imported instance
1396 assert iobj.name == instance
1397 feedback_fn("Running rename script for %s" % instance)
1398 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1399 rename_from,
1400 self.op.debug_level)
1401 if result.fail_msg:
1402 self.LogWarning("Failed to run rename script for %s on node"
1403 " %s: %s" % (instance, pnode_name, result.fail_msg))
1404
1405 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1406
1407 if self.op.start:
1408 iobj.admin_state = constants.ADMINST_UP
1409 self.cfg.Update(iobj, feedback_fn)
1410 logging.info("Starting instance %s on node %s", instance, pnode_name)
1411 feedback_fn("* starting instance...")
1412 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1413 False, self.op.reason)
1414 result.Raise("Could not start instance")
1415
1416 return list(iobj.all_nodes)
1417
1418
1419 class LUInstanceRename(LogicalUnit):
1420 """Rename an instance.
1421
1422 """
1423 HPATH = "instance-rename"
1424 HTYPE = constants.HTYPE_INSTANCE
1425
1426 def CheckArguments(self):
1427 """Check arguments.
1428
1429 """
1430 if self.op.ip_check and not self.op.name_check:
1431 # TODO: make the ip check more flexible and not depend on the name check
1432 raise errors.OpPrereqError("IP address check requires a name check",
1433 errors.ECODE_INVAL)
1434
1435 def BuildHooksEnv(self):
1436 """Build hooks env.
1437
1438 This runs on master, primary and secondary nodes of the instance.
1439
1440 """
1441 env = BuildInstanceHookEnvByObject(self, self.instance)
1442 env["INSTANCE_NEW_NAME"] = self.op.new_name
1443 return env
1444
1445 def BuildHooksNodes(self):
1446 """Build hooks nodes.
1447
1448 """
1449 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1450 return (nl, nl)
1451
1452 def CheckPrereq(self):
1453 """Check prerequisites.
1454
1455 This checks that the instance is in the cluster and is not running.
1456
1457 """
1458 self.op.instance_name = ExpandInstanceName(self.cfg,
1459 self.op.instance_name)
1460 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1461 assert instance is not None
1462 CheckNodeOnline(self, instance.primary_node)
1463 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1464 msg="cannot rename")
1465 self.instance = instance
1466
1467 new_name = self.op.new_name
1468 if self.op.name_check:
1469 hostname = _CheckHostnameSane(self, new_name)
1470 new_name = self.op.new_name = hostname.name
1471 if (self.op.ip_check and
1472 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1473 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1474 (hostname.ip, new_name),
1475 errors.ECODE_NOTUNIQUE)
1476
1477 instance_list = self.cfg.GetInstanceList()
1478 if new_name in instance_list and new_name != instance.name:
1479 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1480 new_name, errors.ECODE_EXISTS)
1481
1482 def Exec(self, feedback_fn):
1483 """Rename the instance.
1484
1485 """
1486 inst = self.instance
1487 old_name = inst.name
1488
1489 rename_file_storage = False
1490 if (inst.disk_template in constants.DTS_FILEBASED and
1491 self.op.new_name != inst.name):
1492 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1493 rename_file_storage = True
1494
1495 self.cfg.RenameInstance(inst.name, self.op.new_name)
1496 # Change the instance lock. This is definitely safe while we hold the BGL.
1497 # Otherwise the new lock would have to be added in acquired mode.
1498 assert self.REQ_BGL
1499 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1500 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1501 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1502
1503 # re-read the instance from the configuration after rename
1504 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1505
1506 if rename_file_storage:
1507 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1508 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1509 old_file_storage_dir,
1510 new_file_storage_dir)
1511 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1512 " (but the instance has been renamed in Ganeti)" %
1513 (inst.primary_node, old_file_storage_dir,
1514 new_file_storage_dir))
1515
1516 StartInstanceDisks(self, inst, None)
1517 # update info on disks
1518 info = GetInstanceInfoText(inst)
1519 for (idx, disk) in enumerate(inst.disks):
1520 for node in inst.all_nodes:
1521 self.cfg.SetDiskID(disk, node)
1522 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1523 if result.fail_msg:
1524 self.LogWarning("Error setting info on node %s for disk %s: %s",
1525 node, idx, result.fail_msg)
1526 try:
1527 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1528 old_name, self.op.debug_level)
1529 msg = result.fail_msg
1530 if msg:
1531 msg = ("Could not run OS rename script for instance %s on node %s"
1532 " (but the instance has been renamed in Ganeti): %s" %
1533 (inst.name, inst.primary_node, msg))
1534 self.LogWarning(msg)
1535 finally:
1536 ShutdownInstanceDisks(self, inst)
1537
1538 return inst.name
1539
1540
1541 class LUInstanceRemove(LogicalUnit):
1542 """Remove an instance.
1543
1544 """
1545 HPATH = "instance-remove"
1546 HTYPE = constants.HTYPE_INSTANCE
1547 REQ_BGL = False
1548
1549 def ExpandNames(self):
1550 self._ExpandAndLockInstance()
1551 self.needed_locks[locking.LEVEL_NODE] = []
1552 self.needed_locks[locking.LEVEL_NODE_RES] = []
1553 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1554
1555 def DeclareLocks(self, level):
1556 if level == locking.LEVEL_NODE:
1557 self._LockInstancesNodes()
1558 elif level == locking.LEVEL_NODE_RES:
1559 # Copy node locks
1560 self.needed_locks[locking.LEVEL_NODE_RES] = \
1561 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1562
1563 def BuildHooksEnv(self):
1564 """Build hooks env.
1565
1566 This runs on master, primary and secondary nodes of the instance.
1567
1568 """
1569 env = BuildInstanceHookEnvByObject(self, self.instance)
1570 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1571 return env
1572
1573 def BuildHooksNodes(self):
1574 """Build hooks nodes.
1575
1576 """
1577 nl = [self.cfg.GetMasterNode()]
1578 nl_post = list(self.instance.all_nodes) + nl
1579 return (nl, nl_post)
1580
1581 def CheckPrereq(self):
1582 """Check prerequisites.
1583
1584 This checks that the instance is in the cluster.
1585
1586 """
1587 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1588 assert self.instance is not None, \
1589 "Cannot retrieve locked instance %s" % self.op.instance_name
1590
1591 def Exec(self, feedback_fn):
1592 """Remove the instance.
1593
1594 """
1595 instance = self.instance
1596 logging.info("Shutting down instance %s on node %s",
1597 instance.name, instance.primary_node)
1598
1599 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1600 self.op.shutdown_timeout,
1601 self.op.reason)
1602 msg = result.fail_msg
1603 if msg:
1604 if self.op.ignore_failures:
1605 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1606 else:
1607 raise errors.OpExecError("Could not shutdown instance %s on"
1608 " node %s: %s" %
1609 (instance.name, instance.primary_node, msg))
1610
1611 assert (self.owned_locks(locking.LEVEL_NODE) ==
1612 self.owned_locks(locking.LEVEL_NODE_RES))
1613 assert not (set(instance.all_nodes) -
1614 self.owned_locks(locking.LEVEL_NODE)), \
1615 "Not owning correct locks"
1616
1617 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1618
1619
1620 class LUInstanceMove(LogicalUnit):
1621 """Move an instance by data-copying.
1622
1623 """
1624 HPATH = "instance-move"
1625 HTYPE = constants.HTYPE_INSTANCE
1626 REQ_BGL = False
1627
1628 def ExpandNames(self):
1629 self._ExpandAndLockInstance()
1630 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1631 self.op.target_node = target_node
1632 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1633 self.needed_locks[locking.LEVEL_NODE_RES] = []
1634 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1635
1636 def DeclareLocks(self, level):
1637 if level == locking.LEVEL_NODE:
1638 self._LockInstancesNodes(primary_only=True)
1639 elif level == locking.LEVEL_NODE_RES:
1640 # Copy node locks
1641 self.needed_locks[locking.LEVEL_NODE_RES] = \
1642 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1643
1644 def BuildHooksEnv(self):
1645 """Build hooks env.
1646
1647 This runs on master, primary and secondary nodes of the instance.
1648
1649 """
1650 env = {
1651 "TARGET_NODE": self.op.target_node,
1652 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1653 }
1654 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1655 return env
1656
1657 def BuildHooksNodes(self):
1658 """Build hooks nodes.
1659
1660 """
1661 nl = [
1662 self.cfg.GetMasterNode(),
1663 self.instance.primary_node,
1664 self.op.target_node,
1665 ]
1666 return (nl, nl)
1667
1668 def CheckPrereq(self):
1669 """Check prerequisites.
1670
1671 This checks that the instance is in the cluster.
1672
1673 """
1674 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1675 assert self.instance is not None, \
1676 "Cannot retrieve locked instance %s" % self.op.instance_name
1677
1678 if instance.disk_template not in constants.DTS_COPYABLE:
1679 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1680 instance.disk_template, errors.ECODE_STATE)
1681
1682 node = self.cfg.GetNodeInfo(self.op.target_node)
1683 assert node is not None, \
1684 "Cannot retrieve locked node %s" % self.op.target_node
1685
1686 self.target_node = target_node = node.name
1687
1688 if target_node == instance.primary_node:
1689 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1690 (instance.name, target_node),
1691 errors.ECODE_STATE)
1692
1693 bep = self.cfg.GetClusterInfo().FillBE(instance)
1694
1695 for idx, dsk in enumerate(instance.disks):
1696 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1697 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1698 " cannot copy" % idx, errors.ECODE_STATE)
1699
1700 CheckNodeOnline(self, target_node)
1701 CheckNodeNotDrained(self, target_node)
1702 CheckNodeVmCapable(self, target_node)
1703 cluster = self.cfg.GetClusterInfo()
1704 group_info = self.cfg.GetNodeGroup(node.group)
1705 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1706 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1707 ignore=self.op.ignore_ipolicy)
1708
1709 if instance.admin_state == constants.ADMINST_UP:
1710 # check memory requirements on the secondary node
1711 CheckNodeFreeMemory(self, target_node,
1712 "failing over instance %s" %
1713 instance.name, bep[constants.BE_MAXMEM],
1714 instance.hypervisor)
1715 else:
1716 self.LogInfo("Not checking memory on the secondary node as"
1717 " instance will not be started")
1718
1719 # check bridge existance
1720 CheckInstanceBridgesExist(self, instance, node=target_node)
1721
1722 def Exec(self, feedback_fn):
1723 """Move an instance.
1724
1725 The move is done by shutting it down on its present node, copying
1726 the data over (slow) and starting it on the new node.
1727
1728 """
1729 instance = self.instance
1730
1731 source_node = instance.primary_node
1732 target_node = self.target_node
1733
1734 self.LogInfo("Shutting down instance %s on source node %s",
1735 instance.name, source_node)
1736
1737 assert (self.owned_locks(locking.LEVEL_NODE) ==
1738 self.owned_locks(locking.LEVEL_NODE_RES))
1739
1740 result = self.rpc.call_instance_shutdown(source_node, instance,
1741 self.op.shutdown_timeout,
1742 self.op.reason)
1743 msg = result.fail_msg
1744 if msg:
1745 if self.op.ignore_consistency:
1746 self.LogWarning("Could not shutdown instance %s on node %s."
1747 " Proceeding anyway. Please make sure node"
1748 " %s is down. Error details: %s",
1749 instance.name, source_node, source_node, msg)
1750 else:
1751 raise errors.OpExecError("Could not shutdown instance %s on"
1752 " node %s: %s" %
1753 (instance.name, source_node, msg))
1754
1755 # create the target disks
1756 try:
1757 CreateDisks(self, instance, target_node=target_node)
1758 except errors.OpExecError:
1759 self.LogWarning("Device creation failed")
1760 self.cfg.ReleaseDRBDMinors(instance.name)
1761 raise
1762
1763 cluster_name = self.cfg.GetClusterInfo().cluster_name
1764
1765 errs = []
1766 # activate, get path, copy the data over
1767 for idx, disk in enumerate(instance.disks):
1768 self.LogInfo("Copying data for disk %d", idx)
1769 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1770 instance.name, True, idx)
1771 if result.fail_msg:
1772 self.LogWarning("Can't assemble newly created disk %d: %s",
1773 idx, result.fail_msg)
1774 errs.append(result.fail_msg)
1775 break
1776 dev_path = result.payload
1777 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1778 target_node, dev_path,
1779 cluster_name)
1780 if result.fail_msg:
1781 self.LogWarning("Can't copy data over for disk %d: %s",
1782 idx, result.fail_msg)
1783 errs.append(result.fail_msg)
1784 break
1785
1786 if errs:
1787 self.LogWarning("Some disks failed to copy, aborting")
1788 try:
1789 RemoveDisks(self, instance, target_node=target_node)
1790 finally:
1791 self.cfg.ReleaseDRBDMinors(instance.name)
1792 raise errors.OpExecError("Errors during disk copy: %s" %
1793 (",".join(errs),))
1794
1795 instance.primary_node = target_node
1796 self.cfg.Update(instance, feedback_fn)
1797
1798 self.LogInfo("Removing the disks on the original node")
1799 RemoveDisks(self, instance, target_node=source_node)
1800
1801 # Only start the instance if it's marked as up
1802 if instance.admin_state == constants.ADMINST_UP:
1803 self.LogInfo("Starting instance %s on node %s",
1804 instance.name, target_node)
1805
1806 disks_ok, _ = AssembleInstanceDisks(self, instance,
1807 ignore_secondaries=True)
1808 if not disks_ok:
1809 ShutdownInstanceDisks(self, instance)
1810 raise errors.OpExecError("Can't activate the instance's disks")
1811
1812 result = self.rpc.call_instance_start(target_node,
1813 (instance, None, None), False,
1814 self.op.reason)
1815 msg = result.fail_msg
1816 if msg:
1817 ShutdownInstanceDisks(self, instance)
1818 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1819 (instance.name, target_node, msg))
1820
1821
1822 class LUInstanceMultiAlloc(NoHooksLU):
1823 """Allocates multiple instances at the same time.
1824
1825 """
1826 REQ_BGL = False
1827
1828 def CheckArguments(self):
1829 """Check arguments.
1830
1831 """
1832 nodes = []
1833 for inst in self.op.instances:
1834 if inst.iallocator is not None:
1835 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1836 " instance objects", errors.ECODE_INVAL)
1837 nodes.append(bool(inst.pnode))
1838 if inst.disk_template in constants.DTS_INT_MIRROR:
1839 nodes.append(bool(inst.snode))
1840
1841 has_nodes = compat.any(nodes)
1842 if compat.all(nodes) ^ has_nodes:
1843 raise errors.OpPrereqError("There are instance objects providing"
1844 " pnode/snode while others do not",
1845 errors.ECODE_INVAL)
1846
1847 if not has_nodes and self.op.iallocator is None:
1848 default_iallocator = self.cfg.GetDefaultIAllocator()
1849 if default_iallocator:
1850 self.op.iallocator = default_iallocator
1851 else:
1852 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1853 " given and no cluster-wide default"
1854 " iallocator found; please specify either"
1855 " an iallocator or nodes on the instances"
1856 " or set a cluster-wide default iallocator",
1857 errors.ECODE_INVAL)
1858
1859 _CheckOpportunisticLocking(self.op)
1860
1861 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1862 if dups:
1863 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1864 utils.CommaJoin(dups), errors.ECODE_INVAL)
1865
1866 def ExpandNames(self):
1867 """Calculate the locks.
1868
1869 """
1870 self.share_locks = ShareAll()
1871 self.needed_locks = {
1872 # iallocator will select nodes and even if no iallocator is used,
1873 # collisions with LUInstanceCreate should be avoided
1874 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1875 }
1876
1877 if self.op.iallocator:
1878 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1879 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1880
1881 if self.op.opportunistic_locking:
1882 self.opportunistic_locks[locking.LEVEL_NODE] = True
1883 else:
1884 nodeslist = []
1885 for inst in self.op.instances:
1886 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1887 nodeslist.append(inst.pnode)
1888 if inst.snode is not None:
1889 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1890 nodeslist.append(inst.snode)
1891
1892 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1893 # Lock resources of instance's primary and secondary nodes (copy to
1894 # prevent accidential modification)
1895 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1896
1897 def DeclareLocks(self, level):
1898 if level == locking.LEVEL_NODE_RES and \
1899 self.opportunistic_locks[locking.LEVEL_NODE]:
1900 # Even when using opportunistic locking, we require the same set of
1901 # NODE_RES locks as we got NODE locks
1902 self.needed_locks[locking.LEVEL_NODE_RES] = \
1903 self.owned_locks(locking.LEVEL_NODE)
1904
1905 def CheckPrereq(self):
1906 """Check prerequisite.
1907
1908 """
1909 if self.op.iallocator:
1910 cluster = self.cfg.GetClusterInfo()
1911 default_vg = self.cfg.GetVGName()
1912 ec_id = self.proc.GetECId()
1913
1914 if self.op.opportunistic_locking:
1915 # Only consider nodes for which a lock is held
1916 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1917 else:
1918 node_whitelist = None
1919
1920 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1921 _ComputeNics(op, cluster, None,
1922 self.cfg, ec_id),
1923 _ComputeFullBeParams(op, cluster),
1924 node_whitelist)
1925 for op in self.op.instances]
1926
1927 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1928 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1929
1930 ial.Run(self.op.iallocator)
1931
1932 if not ial.success:
1933 raise errors.OpPrereqError("Can't compute nodes using"
1934 " iallocator '%s': %s" %
1935 (self.op.iallocator, ial.info),
1936 errors.ECODE_NORES)
1937
1938 self.ia_result = ial.result
1939
1940 if self.op.dry_run:
1941 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1942 constants.JOB_IDS_KEY: [],
1943 })
1944
1945 def _ConstructPartialResult(self):
1946 """Contructs the partial result.
1947
1948 """
1949 if self.op.iallocator:
1950 (allocatable, failed_insts) = self.ia_result
1951 allocatable_insts = map(compat.fst, allocatable)
1952 else:
1953 allocatable_insts = [op.instance_name for op in self.op.instances]
1954 failed_insts = []
1955
1956 return {
1957 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1958 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1959 }
1960
1961 def Exec(self, feedback_fn):
1962 """Executes the opcode.
1963
1964 """
1965 jobs = []
1966 if self.op.iallocator:
1967 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1968 (allocatable, failed) = self.ia_result
1969
1970 for (name, nodes) in allocatable:
1971 op = op2inst.pop(name)
1972
1973 if len(nodes) > 1:
1974 (op.pnode, op.snode) = nodes
1975 else:
1976 (op.pnode,) = nodes
1977
1978 jobs.append([op])
1979
1980 missing = set(op2inst.keys()) - set(failed)
1981 assert not missing, \
1982 "Iallocator did return incomplete result: %s" % \
1983 utils.CommaJoin(missing)
1984 else:
1985 jobs.extend([op] for op in self.op.instances)
1986
1987 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1988
1989
1990 class _InstNicModPrivate:
1991 """Data structure for network interface modifications.
1992
1993 Used by L{LUInstanceSetParams}.
1994
1995 """
1996 def __init__(self):
1997 self.params = None
1998 self.filled = None
1999
2000
2001 def _PrepareContainerMods(mods, private_fn):
2002 """Prepares a list of container modifications by adding a private data field.
2003
2004 @type mods: list of tuples; (operation, index, parameters)
2005 @param mods: List of modifications
2006 @type private_fn: callable or None
2007 @param private_fn: Callable for constructing a private data field for a
2008 modification
2009 @rtype: list
2010
2011 """
2012 if private_fn is None:
2013 fn = lambda: None
2014 else:
2015 fn = private_fn
2016
2017 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2018
2019
2020 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2021 """Checks if nodes have enough physical CPUs
2022
2023 This function checks if all given nodes have the needed number of
2024 physical CPUs. In case any node has less CPUs or we cannot get the
2025 information from the node, this function raises an OpPrereqError
2026 exception.
2027
2028 @type lu: C{LogicalUnit}
2029 @param lu: a logical unit from which we get configuration data
2030 @type nodenames: C{list}
2031 @param nodenames: the list of node names to check
2032 @type requested: C{int}
2033 @param requested: the minimum acceptable number of physical CPUs
2034 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2035 or we cannot check the node
2036
2037 """
2038 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2039 for node in nodenames:
2040 info = nodeinfo[node]
2041 info.Raise("Cannot get current information from node %s" % node,
2042 prereq=True, ecode=errors.ECODE_ENVIRON)
2043 (_, _, (hv_info, )) = info.payload
2044 num_cpus = hv_info.get("cpu_total", None)
2045 if not isinstance(num_cpus, int):
2046 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2047 " on node %s, result was '%s'" %
2048 (node, num_cpus), errors.ECODE_ENVIRON)
2049 if requested > num_cpus:
2050 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2051 "required" % (node, num_cpus, requested),
2052 errors.ECODE_NORES)
2053
2054
2055 def GetItemFromContainer(identifier, kind, container):
2056 """Return the item refered by the identifier.
2057
2058 @type identifier: string
2059 @param identifier: Item index or name or UUID
2060 @type kind: string
2061 @param kind: One-word item description
2062 @type container: list
2063 @param container: Container to get the item from
2064
2065 """
2066 # Index
2067 try:
2068 idx = int(identifier)
2069 if idx == -1:
2070 # Append
2071 absidx = len(container) - 1
2072 elif idx < 0:
2073 raise IndexError("Not accepting negative indices other than -1")
2074 elif idx > len(container):
2075 raise IndexError("Got %s index %s, but there are only %s" %
2076 (kind, idx, len(container)))
2077 else:
2078 absidx = idx
2079 return (absidx, container[idx])
2080 except ValueError:
2081 pass
2082
2083 for idx, item in enumerate(container):
2084 if item.uuid == identifier or item.name == identifier:
2085 return (idx, item)
2086
2087 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2088 (kind, identifier), errors.ECODE_NOENT)
2089
2090
2091 def _ApplyContainerMods(kind, container, chgdesc, mods,
2092 create_fn, modify_fn, remove_fn):
2093 """Applies descriptions in C{mods} to C{container}.
2094
2095 @type kind: string
2096 @param kind: One-word item description
2097 @type container: list
2098 @param container: Container to modify
2099 @type chgdesc: None or list
2100 @param chgdesc: List of applied changes
2101 @type mods: list
2102 @param mods: Modifications as returned by L{_PrepareContainerMods}
2103 @type create_fn: callable
2104 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2105 receives absolute item index, parameters and private data object as added
2106 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2107 as list
2108 @type modify_fn: callable
2109 @param modify_fn: Callback for modifying an existing item
2110 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2111 and private data object as added by L{_PrepareContainerMods}, returns
2112 changes as list
2113 @type remove_fn: callable
2114 @param remove_fn: Callback on removing item; receives absolute item index,
2115 item and private data object as added by L{_PrepareContainerMods}
2116
2117 """
2118 for (op, identifier, params, private) in mods:
2119 changes = None
2120
2121 if op == constants.DDM_ADD:
2122 # Calculate where item will be added
2123 # When adding an item, identifier can only be an index
2124 try:
2125 idx = int(identifier)
2126 except ValueError:
2127 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2128 " identifier for %s" % constants.DDM_ADD,
2129 errors.ECODE_INVAL)
2130 if idx == -1:
2131 addidx = len(container)
2132 else:
2133 if idx < 0:
2134 raise IndexError("Not accepting negative indices other than -1")
2135 elif idx > len(container):
2136 raise IndexError("Got %s index %s, but there are only %s" %
2137 (kind, idx, len(container)))
2138 addidx = idx
2139
2140 if create_fn is None:
2141 item = params
2142 else:
2143 (item, changes) = create_fn(addidx, params, private)
2144
2145 if idx == -1:
2146 container.append(item)
2147 else:
2148 assert idx >= 0
2149 assert idx <= len(container)
2150 # list.insert does so before the specified index
2151 container.insert(idx, item)
2152 else:
2153 # Retrieve existing item
2154 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2155
2156 if op == constants.DDM_REMOVE:
2157 assert not params
2158
2159 if remove_fn is not None:
2160 remove_fn(absidx, item, private)
2161
2162 changes = [("%s/%s" % (kind, absidx), "remove")]
2163
2164 assert container[absidx] == item
2165 del container[absidx]
2166 elif op == constants.DDM_MODIFY:
2167 if modify_fn is not None:
2168 changes = modify_fn(absidx, item, params, private)
2169 else:
2170 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2171
2172 assert _TApplyContModsCbChanges(changes)
2173
2174 if not (chgdesc is None or changes is None):
2175 chgdesc.extend(changes)
2176
2177
2178 def _UpdateIvNames(base_index, disks):
2179 """Updates the C{iv_name} attribute of disks.
2180
2181 @type disks: list of L{objects.Disk}
2182
2183 """
2184 for (idx, disk) in enumerate(disks):
2185 disk.iv_name = "disk/%s" % (base_index + idx, )
2186
2187
2188 class LUInstanceSetParams(LogicalUnit):
2189 """Modifies an instances's parameters.
2190
2191 """
2192 HPATH = "instance-modify"
2193 HTYPE = constants.HTYPE_INSTANCE
2194 REQ_BGL = False
2195
2196 @staticmethod
2197 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2198 assert ht.TList(mods)
2199 assert not mods or len(mods[0]) in (2, 3)
2200
2201 if mods and len(mods[0]) == 2:
2202 result = []
2203
2204 addremove = 0
2205 for op, params in mods:
2206 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2207 result.append((op, -1, params))
2208 addremove += 1
2209
2210 if addremove > 1:
2211 raise errors.OpPrereqError("Only one %s add or remove operation is"
2212 " supported at a time" % kind,
2213 errors.ECODE_INVAL)
2214 else:
2215 result.append((constants.DDM_MODIFY, op, params))
2216
2217 assert verify_fn(result)
2218 else:
2219 result = mods
2220
2221 return result
2222
2223 @staticmethod
2224 def _CheckMods(kind, mods, key_types, item_fn):
2225 """Ensures requested disk/NIC modifications are valid.
2226
2227 """
2228 for (op, _, params) in mods:
2229 assert ht.TDict(params)
2230
2231 # If 'key_types' is an empty dict, we assume we have an
2232 # 'ext' template and thus do not ForceDictType
2233 if key_types:
2234 utils.ForceDictType(params, key_types)
2235
2236 if op == constants.DDM_REMOVE:
2237 if params:
2238 raise errors.OpPrereqError("No settings should be passed when"
2239 " removing a %s" % kind,
2240 errors.ECODE_INVAL)
2241 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2242 item_fn(op, params)
2243 else:
2244 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2245
2246 @staticmethod
2247 def _VerifyDiskModification(op, params):
2248 """Verifies a disk modification.
2249
2250 """
2251 if op == constants.DDM_ADD:
2252 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2253 if mode not in constants.DISK_ACCESS_SET:
2254 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2255 errors.ECODE_INVAL)
2256
2257 size = params.get(constants.IDISK_SIZE, None)
2258 if size is None:
2259 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2260 constants.IDISK_SIZE, errors.ECODE_INVAL)
2261
2262 try:
2263 size = int(size)
2264 except (TypeError, ValueError), err:
2265 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2266 errors.ECODE_INVAL)
2267
2268 params[constants.IDISK_SIZE] = size
2269 name = params.get(constants.IDISK_NAME, None)
2270 if name is not None and name.lower() == constants.VALUE_NONE:
2271 params[constants.IDISK_NAME] = None
2272
2273 elif op == constants.DDM_MODIFY:
2274 if constants.IDISK_SIZE in params:
2275 raise errors.OpPrereqError("Disk size change not possible, use"
2276 " grow-disk", errors.ECODE_INVAL)
2277 if len(params) > 2:
2278 raise errors.OpPrereqError("Disk modification doesn't support"
2279 " additional arbitrary parameters",
2280 errors.ECODE_INVAL)
2281 name = params.get(constants.IDISK_NAME, None)
2282 if name is not None and name.lower() == constants.VALUE_NONE:
2283 params[constants.IDISK_NAME] = None
2284
2285 @staticmethod
2286 def _VerifyNicModification(op, params):
2287 """Verifies a network interface modification.
2288
2289 """
2290 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2291 ip = params.get(constants.INIC_IP, None)
2292 name = params.get(constants.INIC_NAME, None)
2293 req_net = params.get(constants.INIC_NETWORK, None)
2294 link = params.get(constants.NIC_LINK, None)
2295 mode = params.get(constants.NIC_MODE, None)
2296 if name is not None and name.lower() == constants.VALUE_NONE:
2297 params[constants.INIC_NAME] = None
2298 if req_net is not None:
2299 if req_net.lower() == constants.VALUE_NONE:
2300 params[constants.INIC_NETWORK] = None
2301 req_net = None
2302 elif link is not None or mode is not None:
2303 raise errors.OpPrereqError("If network is given"
2304 " mode or link should not",
2305 errors.ECODE_INVAL)
2306
2307 if op == constants.DDM_ADD:
2308 macaddr = params.get(constants.INIC_MAC, None)
2309 if macaddr is None:
2310 params[constants.INIC_MAC] = constants.VALUE_AUTO
2311
2312 if ip is not None:
2313 if ip.lower() == constants.VALUE_NONE:
2314 params[constants.INIC_IP] = None
2315 else:
2316 if ip.lower() == constants.NIC_IP_POOL:
2317 if op == constants.DDM_ADD and req_net is None:
2318 raise errors.OpPrereqError("If ip=pool, parameter network"
2319 " cannot be none",
2320 errors.ECODE_INVAL)
2321 else:
2322 if not netutils.IPAddress.IsValid(ip):
2323 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2324 errors.ECODE_INVAL)
2325
2326 if constants.INIC_MAC in params:
2327 macaddr = params[constants.INIC_MAC]
2328 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2329 macaddr = utils.NormalizeAndValidateMac(macaddr)
2330
2331 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2332 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2333 " modifying an existing NIC",
2334 errors.ECODE_INVAL)
2335
2336 def CheckArguments(self):
2337 if not (self.op.nics or self.op.disks or self.op.disk_template or
2338 self.op.hvparams or self.op.beparams or self.op.os_name or
2339 self.op.osparams or self.op.offline is not None or
2340 self.op.runtime_mem or self.op.pnode):
2341 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2342
2343 if self.op.hvparams:
2344 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2345 "hypervisor", "instance", "cluster")
2346
2347 self.op.disks = self._UpgradeDiskNicMods(
2348 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2349 self.op.nics = self._UpgradeDiskNicMods(
2350 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2351
2352 if self.op.disks and self.op.disk_template is not None:
2353 raise errors.OpPrereqError("Disk template conversion and other disk"
2354 " changes not supported at the same time",
2355 errors.ECODE_INVAL)
2356
2357 if (self.op.disk_template and
2358 self.op.disk_template in constants.DTS_INT_MIRROR and
2359 self.op.remote_node is None):
2360 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2361 " one requires specifying a secondary node",
2362 errors.ECODE_INVAL)
2363
2364 # Check NIC modifications
2365 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2366 self._VerifyNicModification)
2367
2368 if self.op.pnode:
2369 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2370
2371 def ExpandNames(self):
2372 self._ExpandAndLockInstance()
2373 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2374 # Can't even acquire node locks in shared mode as upcoming changes in
2375 # Ganeti 2.6 will start to modify the node object on disk conversion
2376 self.needed_locks[locking.LEVEL_NODE] = []
2377 self.needed_locks[locking.LEVEL_NODE_RES] = []
2378 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2379 # Look node group to look up the ipolicy
2380 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2381
2382 def DeclareLocks(self, level):
2383 if level == locking.LEVEL_NODEGROUP:
2384 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2385 # Acquire locks for the instance's nodegroups optimistically. Needs
2386 # to be verified in CheckPrereq
2387 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2388 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2389 elif level == locking.LEVEL_NODE:
2390 self._LockInstancesNodes()
2391 if self.op.disk_template and self.op.remote_node:
2392 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2393 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2394 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2395 # Copy node locks
2396 self.needed_locks[locking.LEVEL_NODE_RES] = \
2397 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2398
2399 def BuildHooksEnv(self):
2400 """Build hooks env.
2401
2402 This runs on the master, primary and secondaries.
2403
2404 """
2405 args = {}
2406 if constants.BE_MINMEM in self.be_new:
2407 args["minmem"] = self.be_new[constants.BE_MINMEM]
2408 if constants.BE_MAXMEM in self.be_new:
2409 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2410 if constants.BE_VCPUS in self.be_new:
2411 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2412 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2413 # information at all.
2414
2415 if self._new_nics is not None:
2416 nics = []
2417
2418 for nic in self._new_nics:
2419 n = copy.deepcopy(nic)
2420 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2421 n.nicparams = nicparams
2422 nics.append(NICToTuple(self, n))
2423
2424 args["nics"] = nics
2425
2426 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2427 if self.op.disk_template:
2428 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2429 if self.op.runtime_mem:
2430 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2431
2432 return env
2433
2434 def BuildHooksNodes(self):
2435 """Build hooks nodes.
2436
2437 """
2438 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2439 return (nl, nl)
2440
2441 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2442 old_params, cluster, pnode):
2443
2444 update_params_dict = dict([(key, params[key])
2445 for key in constants.NICS_PARAMETERS
2446 if key in params])
2447
2448 req_link = update_params_dict.get(constants.NIC_LINK, None)
2449 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2450
2451 new_net_uuid = None
2452 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2453 if new_net_uuid_or_name:
2454 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2455 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2456
2457 if old_net_uuid:
2458 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2459
2460 if new_net_uuid:
2461 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2462 if not netparams:
2463 raise errors.OpPrereqError("No netparams found for the network"
2464 " %s, probably not connected" %
2465 new_net_obj.name, errors.ECODE_INVAL)
2466 new_params = dict(netparams)
2467 else:
2468 new_params = GetUpdatedParams(old_params, update_params_dict)
2469
2470 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2471
2472 new_filled_params = cluster.SimpleFillNIC(new_params)
2473 objects.NIC.CheckParameterSyntax(new_filled_params)
2474
2475 new_mode = new_filled_params[constants.NIC_MODE]
2476 if new_mode == constants.NIC_MODE_BRIDGED:
2477 bridge = new_filled_params[constants.NIC_LINK]
2478 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2479 if msg:
2480 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2481 if self.op.force:
2482 self.warn.append(msg)
2483 else:
2484 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2485
2486 elif new_mode == constants.NIC_MODE_ROUTED:
2487 ip = params.get(constants.INIC_IP, old_ip)
2488 if ip is None:
2489 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2490 " on a routed NIC", errors.ECODE_INVAL)
2491
2492 elif new_mode == constants.NIC_MODE_OVS:
2493 # TODO: check OVS link
2494 self.LogInfo("OVS links are currently not checked for correctness")
2495
2496 if constants.INIC_MAC in params:
2497 mac = params[constants.INIC_MAC]
2498 if mac is None:
2499 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2500 errors.ECODE_INVAL)
2501 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2502 # otherwise generate the MAC address
2503 params[constants.INIC_MAC] = \
2504 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2505 else:
2506 # or validate/reserve the current one
2507 try:
2508 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2509 except errors.ReservationError:
2510 raise errors.OpPrereqError("MAC address '%s' already in use"
2511 " in cluster" % mac,
2512 errors.ECODE_NOTUNIQUE)
2513 elif new_net_uuid != old_net_uuid:
2514
2515 def get_net_prefix(net_uuid):
2516 mac_prefix = None
2517 if net_uuid:
2518 nobj = self.cfg.GetNetwork(net_uuid)
2519 mac_prefix = nobj.mac_prefix
2520
2521 return mac_prefix
2522
2523 new_prefix = get_net_prefix(new_net_uuid)
2524 old_prefix = get_net_prefix(old_net_uuid)
2525 if old_prefix != new_prefix:
2526 params[constants.INIC_MAC] = \
2527 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2528
2529 # if there is a change in (ip, network) tuple
2530 new_ip = params.get(constants.INIC_IP, old_ip)
2531 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2532 if new_ip:
2533 # if IP is pool then require a network and generate one IP
2534 if new_ip.lower() == constants.NIC_IP_POOL:
2535 if new_net_uuid:
2536 try:
2537 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2538 except errors.ReservationError:
2539 raise errors.OpPrereqError("Unable to get a free IP"
2540 " from the address pool",
2541 errors.ECODE_STATE)
2542 self.LogInfo("Chose IP %s from network %s",
2543 new_ip,
2544 new_net_obj.name)
2545 params[constants.INIC_IP] = new_ip
2546 else:
2547 raise errors.OpPrereqError("ip=pool, but no network found",
2548 errors.ECODE_INVAL)
2549 # Reserve new IP if in the new network if any
2550 elif new_net_uuid:
2551 try:
2552 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2553 self.LogInfo("Reserving IP %s in network %s",
2554 new_ip, new_net_obj.name)
2555 except errors.ReservationError:
2556 raise errors.OpPrereqError("IP %s not available in network %s" %
2557 (new_ip, new_net_obj.name),
2558 errors.ECODE_NOTUNIQUE)
2559 # new network is None so check if new IP is a conflicting IP
2560 elif self.op.conflicts_check:
2561 _CheckForConflictingIp(self, new_ip, pnode)
2562
2563 # release old IP if old network is not None
2564 if old_ip and old_net_uuid:
2565 try:
2566 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2567 except errors.AddressPoolError:
2568 logging.warning("Release IP %s not contained in network %s",
2569 old_ip, old_net_obj.name)
2570
2571 # there are no changes in (ip, network) tuple and old network is not None
2572 elif (old_net_uuid is not None and
2573 (req_link is not None or req_mode is not None)):
2574 raise errors.OpPrereqError("Not allowed to change link or mode of"
2575 " a NIC that is connected to a network",
2576 errors.ECODE_INVAL)
2577
2578 private.params = new_params
2579 private.filled = new_filled_params
2580
2581 def _PreCheckDiskTemplate(self, pnode_info):
2582 """CheckPrereq checks related to a new disk template."""
2583 # Arguments are passed to avoid configuration lookups
2584 instance = self.instance
2585 pnode = instance.primary_node
2586 cluster = self.cluster
2587 if instance.disk_template == self.op.disk_template:
2588 raise errors.OpPrereqError("Instance already has disk template %s" %
2589 instance.disk_template, errors.ECODE_INVAL)
2590
2591 if (instance.disk_template,
2592 self.op.disk_template) not in self._DISK_CONVERSIONS:
2593 raise errors.OpPrereqError("Unsupported disk template conversion from"
2594 " %s to %s" % (instance.disk_template,
2595 self.op.disk_template),
2596 errors.ECODE_INVAL)
2597 CheckInstanceState(self, instance, INSTANCE_DOWN,
2598 msg="cannot change disk template")
2599 if self.op.disk_template in constants.DTS_INT_MIRROR:
2600 if self.op.remote_node == pnode:
2601 raise errors.OpPrereqError("Given new secondary node %s is the same"
2602 " as the primary node of the instance" %
2603 self.op.remote_node, errors.ECODE_STATE)
2604 CheckNodeOnline(self, self.op.remote_node)
2605 CheckNodeNotDrained(self, self.op.remote_node)
2606 # FIXME: here we assume that the old instance type is DT_PLAIN
2607 assert instance.disk_template == constants.DT_PLAIN
2608 disks = [{constants.IDISK_SIZE: d.size,
2609 constants.IDISK_VG: d.logical_id[0]}
2610 for d in instance.disks]
2611 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2612 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2613
2614 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2615 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2616 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2617 snode_group)
2618 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2619 ignore=self.op.ignore_ipolicy)
2620 if pnode_info.group != snode_info.group:
2621 self.LogWarning("The primary and secondary nodes are in two"
2622 " different node groups; the disk parameters"
2623 " from the first disk's node group will be"
2624 " used")
2625
2626 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2627 # Make sure none of the nodes require exclusive storage
2628 nodes = [pnode_info]
2629 if self.op.disk_template in constants.DTS_INT_MIRROR:
2630 assert snode_info
2631 nodes.append(snode_info)
2632 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2633 if compat.any(map(has_es, nodes)):
2634 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2635 " storage is enabled" % (instance.disk_template,
2636 self.op.disk_template))
2637 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2638
2639 def CheckPrereq(self):
2640 """Check prerequisites.
2641
2642 This only checks the instance list against the existing names.
2643
2644 """
2645 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2646 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2647
2648 cluster = self.cluster = self.cfg.GetClusterInfo()
2649 assert self.instance is not None, \
2650 "Cannot retrieve locked instance %s" % self.op.instance_name
2651
2652 pnode = instance.primary_node
2653
2654 self.warn = []
2655
2656 if (self.op.pnode is not None and self.op.pnode != pnode and
2657 not self.op.force):
2658 # verify that the instance is not up
2659 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2660 instance.hypervisor)
2661 if instance_info.fail_msg:
2662 self.warn.append("Can't get instance runtime information: %s" %
2663 instance_info.fail_msg)
2664 elif instance_info.payload:
2665 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2666 errors.ECODE_STATE)
2667
2668 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2669 nodelist = list(instance.all_nodes)
2670 pnode_info = self.cfg.GetNodeInfo(pnode)
2671 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2672
2673 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2674 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2675 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2676
2677 # dictionary with instance information after the modification
2678 ispec = {}
2679
2680 # Check disk modifications. This is done here and not in CheckArguments
2681 # (as with NICs), because we need to know the instance's disk template
2682 if instance.disk_template == constants.DT_EXT:
2683 self._CheckMods("disk", self.op.disks, {},
2684 self._VerifyDiskModification)
2685 else:
2686 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2687 self._VerifyDiskModification)
2688
2689 # Prepare disk/NIC modifications
2690 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2691 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2692
2693 # Check the validity of the `provider' parameter
2694 if instance.disk_template in constants.DT_EXT:
2695 for mod in self.diskmod:
2696 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2697 if mod[0] == constants.DDM_ADD:
2698 if ext_provider is None:
2699 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2700 " '%s' missing, during disk add" %
2701 (constants.DT_EXT,
2702 constants.IDISK_PROVIDER),
2703 errors.ECODE_NOENT)
2704 elif mod[0] == constants.DDM_MODIFY:
2705 if ext_provider:
2706 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2707 " modification" %
2708 constants.IDISK_PROVIDER,
2709 errors.ECODE_INVAL)
2710 else:
2711 for mod in self.diskmod:
2712 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2713 if ext_provider is not None:
2714 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2715 " instances of type '%s'" %
2716 (constants.IDISK_PROVIDER,
2717 constants.DT_EXT),
2718 errors.ECODE_INVAL)
2719
2720 # OS change
2721 if self.op.os_name and not self.op.force:
2722 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2723 self.op.force_variant)
2724 instance_os = self.op.os_name
2725 else:
2726 instance_os = instance.os
2727
2728 assert not (self.op.disk_template and self.op.disks), \
2729 "Can't modify disk template and apply disk changes at the same time"
2730
2731 if self.op.disk_template:
2732 self._PreCheckDiskTemplate(pnode_info)
2733
2734 # hvparams processing
2735 if self.op.hvparams:
2736 hv_type = instance.hypervisor
2737 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2738 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2739 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2740
2741 # local check
2742 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2743 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2744 self.hv_proposed = self.hv_new = hv_new # the new actual values
2745 self.hv_inst = i_hvdict # the new dict (without defaults)
2746 else:
2747 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2748 instance.hvparams)
2749 self.hv_new = self.hv_inst = {}
2750
2751 # beparams processing
2752 if self.op.beparams:
2753 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2754 use_none=True)
2755 objects.UpgradeBeParams(i_bedict)
2756 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2757 be_new = cluster.SimpleFillBE(i_bedict)
2758 self.be_proposed = self.be_new = be_new # the new actual values
2759 self.be_inst = i_bedict # the new dict (without defaults)
2760 else:
2761 self.be_new = self.be_inst = {}
2762 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2763 be_old = cluster.FillBE(instance)
2764
2765 # CPU param validation -- checking every time a parameter is
2766 # changed to cover all cases where either CPU mask or vcpus have
2767 # changed
2768 if (constants.BE_VCPUS in self.be_proposed and
2769 constants.HV_CPU_MASK in self.hv_proposed):
2770 cpu_list = \
2771 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2772 # Verify mask is consistent with number of vCPUs. Can skip this
2773 # test if only 1 entry in the CPU mask, which means same mask
2774 # is applied to all vCPUs.
2775 if (len(cpu_list) > 1 and
2776 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2777 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2778 " CPU mask [%s]" %
2779 (self.be_proposed[constants.BE_VCPUS],
2780 self.hv_proposed[constants.HV_CPU_MASK]),
2781 errors.ECODE_INVAL)
2782
2783 # Only perform this test if a new CPU mask is given
2784 if constants.HV_CPU_MASK in self.hv_new:
2785 # Calculate the largest CPU number requested
2786 max_requested_cpu = max(map(max, cpu_list))
2787 # Check that all of the instance's nodes have enough physical CPUs to
2788 # satisfy the requested CPU mask
2789 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2790 max_requested_cpu + 1, instance.hypervisor)
2791
2792 # osparams processing
2793 if self.op.osparams:
2794 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2795 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2796 self.os_inst = i_osdict # the new dict (without defaults)
2797 else:
2798 self.os_inst = {}
2799
2800 #TODO(dynmem): do the appropriate check involving MINMEM
2801 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2802 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2803 mem_check_list = [pnode]
2804 if be_new[constants.BE_AUTO_BALANCE]:
2805 # either we changed auto_balance to yes or it was from before
2806 mem_check_list.extend(instance.secondary_nodes)
2807 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2808 instance.hypervisor)
2809 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2810 [instance.hypervisor], False)
2811 pninfo = nodeinfo[pnode]
2812 msg = pninfo.fail_msg
2813 if msg:
2814 # Assume the primary node is unreachable and go ahead
2815 self.warn.append("Can't get info from primary node %s: %s" %
2816 (pnode, msg))
2817 else:
2818 (_, _, (pnhvinfo, )) = pninfo.payload
2819 if not isinstance(pnhvinfo.get("memory_free", None), int):
2820 self.warn.append("Node data from primary node %s doesn't contain"
2821 " free memory information" % pnode)
2822 elif instance_info.fail_msg:
2823 self.warn.append("Can't get instance runtime information: %s" %
2824 instance_info.fail_msg)
2825 else:
2826 if instance_info.payload:
2827 current_mem = int(instance_info.payload["memory"])
2828 else:
2829 # Assume instance not running
2830 # (there is a slight race condition here, but it's not very
2831 # probable, and we have no other way to check)
2832 # TODO: Describe race condition
2833 current_mem = 0
2834 #TODO(dynmem): do the appropriate check involving MINMEM
2835 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2836 pnhvinfo["memory_free"])
2837 if miss_mem > 0:
2838 raise errors.OpPrereqError("This change will prevent the instance"
2839 " from starting, due to %d MB of memory"
2840 " missing on its primary node" %
2841 miss_mem, errors.ECODE_NORES)
2842
2843 if be_new[constants.BE_AUTO_BALANCE]:
2844 for node, nres in nodeinfo.items():
2845 if node not in instance.secondary_nodes:
2846 continue
2847 nres.Raise("Can't get info from secondary node %s" % node,
2848 prereq=True, ecode=errors.ECODE_STATE)
2849 (_, _, (nhvinfo, )) = nres.payload
2850 if not isinstance(nhvinfo.get("memory_free", None), int):
2851 raise errors.OpPrereqError("Secondary node %s didn't return free"
2852 " memory information" % node,
2853 errors.ECODE_STATE)
2854 #TODO(dynmem): do the appropriate check involving MINMEM
2855 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2856 raise errors.OpPrereqError("This change will prevent the instance"
2857 " from failover to its secondary node"
2858 " %s, due to not enough memory" % node,
2859 errors.ECODE_STATE)
2860
2861 if self.op.runtime_mem:
2862 remote_info = self.rpc.call_instance_info(instance.primary_node,
2863 instance.name,
2864 instance.hypervisor)
2865 remote_info.Raise("Error checking node %s" % instance.primary_node)
2866 if not remote_info.payload: # not running already
2867 raise errors.OpPrereqError("Instance %s is not running" %
2868 instance.name, errors.ECODE_STATE)
2869
2870 current_memory = remote_info.payload["memory"]
2871 if (not self.op.force and
2872 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2873 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2874 raise errors.OpPrereqError("Instance %s must have memory between %d"
2875 " and %d MB of memory unless --force is"
2876 " given" %
2877 (instance.name,
2878 self.be_proposed[constants.BE_MINMEM],
2879 self.be_proposed[constants.BE_MAXMEM]),
2880 errors.ECODE_INVAL)
2881
2882 delta = self.op.runtime_mem - current_memory
2883 if delta > 0:
2884 CheckNodeFreeMemory(self, instance.primary_node,
2885 "ballooning memory for instance %s" %
2886 instance.name, delta, instance.hypervisor)
2887
2888 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2889 raise errors.OpPrereqError("Disk operations not supported for"
2890 " diskless instances", errors.ECODE_INVAL)
2891
2892 def _PrepareNicCreate(_, params, private):
2893 self._PrepareNicModification(params, private, None, None,
2894 {}, cluster, pnode)
2895 return (None, None)
2896
2897 def _PrepareNicMod(_, nic, params, private):
2898 self._PrepareNicModification(params, private, nic.ip, nic.network,
2899 nic.nicparams, cluster, pnode)
2900 return None
2901
2902 def _PrepareNicRemove(_, params, __):
2903 ip = params.ip
2904 net = params.network
2905 if net is not None and ip is not None:
2906 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2907
2908 # Verify NIC changes (operating on copy)
2909 nics = instance.nics[:]
2910 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2911 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2912 if len(nics) > constants.MAX_NICS:
2913 raise errors.OpPrereqError("Instance has too many network interfaces"
2914 " (%d), cannot add more" % constants.MAX_NICS,
2915 errors.ECODE_STATE)
2916
2917 def _PrepareDiskMod(_, disk, params, __):
2918 disk.name = params.get(constants.IDISK_NAME, None)
2919
2920 # Verify disk changes (operating on a copy)
2921 disks = copy.deepcopy(instance.disks)
2922 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2923 _PrepareDiskMod, None)
2924 utils.ValidateDeviceNames("disk", disks)
2925 if len(disks) > constants.MAX_DISKS:
2926 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2927 " more" % constants.MAX_DISKS,
2928 errors.ECODE_STATE)
2929 disk_sizes = [disk.size for disk in instance.disks]
2930 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2931 self.diskmod if op == constants.DDM_ADD)
2932 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2933 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2934
2935 if self.op.offline is not None and self.op.offline:
2936 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2937 msg="can't change to offline")
2938
2939 # Pre-compute NIC changes (necessary to use result in hooks)
2940 self._nic_chgdesc = []
2941 if self.nicmod:
2942 # Operate on copies as this is still in prereq
2943 nics = [nic.Copy() for nic in instance.nics]
2944 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2945 self._CreateNewNic, self._ApplyNicMods, None)
2946 # Verify that NIC names are unique and valid
2947 utils.ValidateDeviceNames("NIC", nics)
2948 self._new_nics = nics
2949 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2950 else:
2951 self._new_nics = None
2952 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2953
2954 if not self.op.ignore_ipolicy:
2955 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2956 group_info)
2957
2958 # Fill ispec with backend parameters
2959 ispec[constants.ISPEC_SPINDLE_USE] = \
2960 self.be_new.get(constants.BE_SPINDLE_USE, None)
2961 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2962 None)
2963
2964 # Copy ispec to verify parameters with min/max values separately
2965 if self.op.disk_template:
2966 new_disk_template = self.op.disk_template
2967 else:
2968 new_disk_template = instance.disk_template
2969 ispec_max = ispec.copy()
2970 ispec_max[constants.ISPEC_MEM_SIZE] = \
2971 self.be_new.get(constants.BE_MAXMEM, None)
2972 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2973 new_disk_template)
2974 ispec_min = ispec.copy()
2975 ispec_min[constants.ISPEC_MEM_SIZE] = \
2976 self.be_new.get(constants.BE_MINMEM, None)
2977 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2978 new_disk_template)
2979
2980 if (res_max or res_min):
2981 # FIXME: Improve error message by including information about whether
2982 # the upper or lower limit of the parameter fails the ipolicy.
2983 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2984 (group_info, group_info.name,
2985 utils.CommaJoin(set(res_max + res_min))))
2986 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2987
2988 def _ConvertPlainToDrbd(self, feedback_fn):
2989 """Converts an instance from plain to drbd.
2990
2991 """
2992 feedback_fn("Converting template to drbd")
2993 instance = self.instance
2994 pnode = instance.primary_node
2995 snode = self.op.remote_node
2996
2997 assert instance.disk_template == constants.DT_PLAIN
2998