2d4864ecf2cc9736a384f83183600aeb7c3dd356
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64
65 import ganeti.masterd.instance
66
67
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
69 #: callbacks
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72 ht.TNonEmptyString,
73 ht.TAny,
74 ])))
75
76
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
79
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
82
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
86
87 """
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
95 return hostname
96
97
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
100
101 """
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
105 errors.ECODE_INVAL)
106
107
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
110
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
117
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
119
120 """
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
124 tags=op.tags,
125 os=op.os_type,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
129 disks=disks,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
133
134
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
137
138 @param op: The instance opcode
139 @param cluster: The cluster config object
140
141 @return: The fully filled beparams
142
143 """
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
151
152
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
155
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
161
162 @returns: The build up nics
163
164 """
165 nics = []
166 for nic in op.nics:
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
175
176 if net is None or net.lower() == constants.VALUE_NONE:
177 net = None
178 else:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
182 errors.ECODE_INVAL)
183
184 # ip validity checks
185 if ip is None or ip.lower() == constants.VALUE_NONE:
186 nic_ip = None
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
191 errors.ECODE_INVAL)
192 nic_ip = default_ip
193 else:
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
197 if net is None:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
200 errors.ECODE_INVAL)
201
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204 errors.ECODE_INVAL)
205
206 nic_ip = ip
207
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
211 errors.ECODE_INVAL)
212
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
217
218 try:
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
223 " in cluster" % mac,
224 errors.ECODE_NOTUNIQUE)
225
226 # Build nic parameters
227 nicparams = {}
228 if nic_mode_req:
229 nicparams[constants.NIC_MODE] = nic_mode
230 if link:
231 nicparams[constants.NIC_LINK] = link
232
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
238 name = None
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 nics.append(nic_obj)
243
244 return nics
245
246
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
249
250 @type ip: string
251 @param ip: IP address
252 @type node: string
253 @param node: node name
254
255 """
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
260 (ip, conf_net)),
261 errors.ECODE_STATE)
262
263 return (None, None)
264
265
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
270
271 @type ipolicy: dict
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
279
280 """
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
290
291
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
294
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
297 @type name: string
298 @param name: OS name passed by the user, to check for validity
299
300 """
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
303 if variant:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
306 errors.ECODE_INVAL)
307 return
308 if not variant:
309 raise errors.OpPrereqError("OS name must include a variant",
310 errors.ECODE_INVAL)
311
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314
315
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
318
319 """
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
322 REQ_BGL = False
323
324 def CheckArguments(self):
325 """Check arguments.
326
327 """
328 # do not require name_check to ease forward/backward compatibility
329 # for tools
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
336
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
341
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
347
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
350
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
358
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
365 has_adopt = True
366 else:
367 has_no_adopt = True
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
370 errors.ECODE_INVAL)
371 if has_adopt:
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
376 errors.ECODE_INVAL)
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
383 else:
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
388 errors.ECODE_INVAL)
389
390 self.adopt_disks = has_adopt
391
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
398 else:
399 self.check_ip = None
400
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
406
407 if self.op.disk_template == constants.DT_FILE:
408 opcodes.RequireFileStorage()
409 elif self.op.disk_template == constants.DT_SHARED_FILE:
410 opcodes.RequireSharedFileStorage()
411
412 ### Node/iallocator related checks
413 CheckIAllocatorOrNode(self, "iallocator", "pnode")
414
415 if self.op.pnode is not None:
416 if self.op.disk_template in constants.DTS_INT_MIRROR:
417 if self.op.snode is None:
418 raise errors.OpPrereqError("The networked disk templates need"
419 " a mirror node", errors.ECODE_INVAL)
420 elif self.op.snode:
421 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
422 " template")
423 self.op.snode = None
424
425 _CheckOpportunisticLocking(self.op)
426
427 self._cds = GetClusterDomainSecret()
428
429 if self.op.mode == constants.INSTANCE_IMPORT:
430 # On import force_variant must be True, because if we forced it at
431 # initial install, our only chance when importing it back is that it
432 # works again!
433 self.op.force_variant = True
434
435 if self.op.no_install:
436 self.LogInfo("No-installation mode has no effect during import")
437
438 elif self.op.mode == constants.INSTANCE_CREATE:
439 if self.op.os_type is None:
440 raise errors.OpPrereqError("No guest OS specified",
441 errors.ECODE_INVAL)
442 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
443 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
444 " installation" % self.op.os_type,
445 errors.ECODE_STATE)
446 if self.op.disk_template is None:
447 raise errors.OpPrereqError("No disk template specified",
448 errors.ECODE_INVAL)
449
450 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
451 # Check handshake to ensure both clusters have the same domain secret
452 src_handshake = self.op.source_handshake
453 if not src_handshake:
454 raise errors.OpPrereqError("Missing source handshake",
455 errors.ECODE_INVAL)
456
457 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
458 src_handshake)
459 if errmsg:
460 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
461 errors.ECODE_INVAL)
462
463 # Load and check source CA
464 self.source_x509_ca_pem = self.op.source_x509_ca
465 if not self.source_x509_ca_pem:
466 raise errors.OpPrereqError("Missing source X509 CA",
467 errors.ECODE_INVAL)
468
469 try:
470 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
471 self._cds)
472 except OpenSSL.crypto.Error, err:
473 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
474 (err, ), errors.ECODE_INVAL)
475
476 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
477 if errcode is not None:
478 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
479 errors.ECODE_INVAL)
480
481 self.source_x509_ca = cert
482
483 src_instance_name = self.op.source_instance_name
484 if not src_instance_name:
485 raise errors.OpPrereqError("Missing source instance name",
486 errors.ECODE_INVAL)
487
488 self.source_instance_name = \
489 netutils.GetHostname(name=src_instance_name).name
490
491 else:
492 raise errors.OpPrereqError("Invalid instance creation mode %r" %
493 self.op.mode, errors.ECODE_INVAL)
494
495 def ExpandNames(self):
496 """ExpandNames for CreateInstance.
497
498 Figure out the right locks for instance creation.
499
500 """
501 self.needed_locks = {}
502
503 instance_name = self.op.instance_name
504 # this is just a preventive check, but someone might still add this
505 # instance in the meantime, and creation will fail at lock-add time
506 if instance_name in self.cfg.GetInstanceList():
507 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 instance_name, errors.ECODE_EXISTS)
509
510 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
511
512 if self.op.iallocator:
513 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514 # specifying a group on instance creation and then selecting nodes from
515 # that group
516 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
518
519 if self.op.opportunistic_locking:
520 self.opportunistic_locks[locking.LEVEL_NODE] = True
521 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
522 else:
523 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
524 nodelist = [self.op.pnode]
525 if self.op.snode is not None:
526 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
527 nodelist.append(self.op.snode)
528 self.needed_locks[locking.LEVEL_NODE] = nodelist
529
530 # in case of import lock the source node too
531 if self.op.mode == constants.INSTANCE_IMPORT:
532 src_node = self.op.src_node
533 src_path = self.op.src_path
534
535 if src_path is None:
536 self.op.src_path = src_path = self.op.instance_name
537
538 if src_node is None:
539 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
540 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
541 self.op.src_node = None
542 if os.path.isabs(src_path):
543 raise errors.OpPrereqError("Importing an instance from a path"
544 " requires a source node option",
545 errors.ECODE_INVAL)
546 else:
547 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
548 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
549 self.needed_locks[locking.LEVEL_NODE].append(src_node)
550 if not os.path.isabs(src_path):
551 self.op.src_path = src_path = \
552 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
553
554 self.needed_locks[locking.LEVEL_NODE_RES] = \
555 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
556
557 def _RunAllocator(self):
558 """Run the allocator based on input opcode.
559
560 """
561 if self.op.opportunistic_locking:
562 # Only consider nodes for which a lock is held
563 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
564 else:
565 node_whitelist = None
566
567 #TODO Export network to iallocator so that it chooses a pnode
568 # in a nodegroup that has the desired network connected to
569 req = _CreateInstanceAllocRequest(self.op, self.disks,
570 self.nics, self.be_full,
571 node_whitelist)
572 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
573
574 ial.Run(self.op.iallocator)
575
576 if not ial.success:
577 # When opportunistic locks are used only a temporary failure is generated
578 if self.op.opportunistic_locking:
579 ecode = errors.ECODE_TEMP_NORES
580 else:
581 ecode = errors.ECODE_NORES
582
583 raise errors.OpPrereqError("Can't compute nodes using"
584 " iallocator '%s': %s" %
585 (self.op.iallocator, ial.info),
586 ecode)
587
588 self.op.pnode = ial.result[0]
589 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
590 self.op.instance_name, self.op.iallocator,
591 utils.CommaJoin(ial.result))
592
593 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
594
595 if req.RequiredNodes() == 2:
596 self.op.snode = ial.result[1]
597
598 def BuildHooksEnv(self):
599 """Build hooks env.
600
601 This runs on master, primary and secondary nodes of the instance.
602
603 """
604 env = {
605 "ADD_MODE": self.op.mode,
606 }
607 if self.op.mode == constants.INSTANCE_IMPORT:
608 env["SRC_NODE"] = self.op.src_node
609 env["SRC_PATH"] = self.op.src_path
610 env["SRC_IMAGES"] = self.src_images
611
612 env.update(BuildInstanceHookEnv(
613 name=self.op.instance_name,
614 primary_node=self.op.pnode,
615 secondary_nodes=self.secondaries,
616 status=self.op.start,
617 os_type=self.op.os_type,
618 minmem=self.be_full[constants.BE_MINMEM],
619 maxmem=self.be_full[constants.BE_MAXMEM],
620 vcpus=self.be_full[constants.BE_VCPUS],
621 nics=NICListToTuple(self, self.nics),
622 disk_template=self.op.disk_template,
623 disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
624 d[constants.IDISK_MODE]) for d in self.disks],
625 bep=self.be_full,
626 hvp=self.hv_full,
627 hypervisor_name=self.op.hypervisor,
628 tags=self.op.tags,
629 ))
630
631 return env
632
633 def BuildHooksNodes(self):
634 """Build hooks nodes.
635
636 """
637 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
638 return nl, nl
639
640 def _ReadExportInfo(self):
641 """Reads the export information from disk.
642
643 It will override the opcode source node and path with the actual
644 information, if these two were not specified before.
645
646 @return: the export information
647
648 """
649 assert self.op.mode == constants.INSTANCE_IMPORT
650
651 src_node = self.op.src_node
652 src_path = self.op.src_path
653
654 if src_node is None:
655 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
656 exp_list = self.rpc.call_export_list(locked_nodes)
657 found = False
658 for node in exp_list:
659 if exp_list[node].fail_msg:
660 continue
661 if src_path in exp_list[node].payload:
662 found = True
663 self.op.src_node = src_node = node
664 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
665 src_path)
666 break
667 if not found:
668 raise errors.OpPrereqError("No export found for relative path %s" %
669 src_path, errors.ECODE_INVAL)
670
671 CheckNodeOnline(self, src_node)
672 result = self.rpc.call_export_info(src_node, src_path)
673 result.Raise("No export or invalid export found in dir %s" % src_path)
674
675 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
676 if not export_info.has_section(constants.INISECT_EXP):
677 raise errors.ProgrammerError("Corrupted export config",
678 errors.ECODE_ENVIRON)
679
680 ei_version = export_info.get(constants.INISECT_EXP, "version")
681 if (int(ei_version) != constants.EXPORT_VERSION):
682 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
683 (ei_version, constants.EXPORT_VERSION),
684 errors.ECODE_ENVIRON)
685 return export_info
686
687 def _ReadExportParams(self, einfo):
688 """Use export parameters as defaults.
689
690 In case the opcode doesn't specify (as in override) some instance
691 parameters, then try to use them from the export information, if
692 that declares them.
693
694 """
695 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
696
697 if self.op.disk_template is None:
698 if einfo.has_option(constants.INISECT_INS, "disk_template"):
699 self.op.disk_template = einfo.get(constants.INISECT_INS,
700 "disk_template")
701 if self.op.disk_template not in constants.DISK_TEMPLATES:
702 raise errors.OpPrereqError("Disk template specified in configuration"
703 " file is not one of the allowed values:"
704 " %s" %
705 " ".join(constants.DISK_TEMPLATES),
706 errors.ECODE_INVAL)
707 else:
708 raise errors.OpPrereqError("No disk template specified and the export"
709 " is missing the disk_template information",
710 errors.ECODE_INVAL)
711
712 if not self.op.disks:
713 disks = []
714 # TODO: import the disk iv_name too
715 for idx in range(constants.MAX_DISKS):
716 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
717 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
718 disks.append({constants.IDISK_SIZE: disk_sz})
719 self.op.disks = disks
720 if not disks and self.op.disk_template != constants.DT_DISKLESS:
721 raise errors.OpPrereqError("No disk info specified and the export"
722 " is missing the disk information",
723 errors.ECODE_INVAL)
724
725 if not self.op.nics:
726 nics = []
727 for idx in range(constants.MAX_NICS):
728 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
729 ndict = {}
730 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
731 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
732 ndict[name] = v
733 nics.append(ndict)
734 else:
735 break
736 self.op.nics = nics
737
738 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
739 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
740
741 if (self.op.hypervisor is None and
742 einfo.has_option(constants.INISECT_INS, "hypervisor")):
743 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
744
745 if einfo.has_section(constants.INISECT_HYP):
746 # use the export parameters but do not override the ones
747 # specified by the user
748 for name, value in einfo.items(constants.INISECT_HYP):
749 if name not in self.op.hvparams:
750 self.op.hvparams[name] = value
751
752 if einfo.has_section(constants.INISECT_BEP):
753 # use the parameters, without overriding
754 for name, value in einfo.items(constants.INISECT_BEP):
755 if name not in self.op.beparams:
756 self.op.beparams[name] = value
757 # Compatibility for the old "memory" be param
758 if name == constants.BE_MEMORY:
759 if constants.BE_MAXMEM not in self.op.beparams:
760 self.op.beparams[constants.BE_MAXMEM] = value
761 if constants.BE_MINMEM not in self.op.beparams:
762 self.op.beparams[constants.BE_MINMEM] = value
763 else:
764 # try to read the parameters old style, from the main section
765 for name in constants.BES_PARAMETERS:
766 if (name not in self.op.beparams and
767 einfo.has_option(constants.INISECT_INS, name)):
768 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
769
770 if einfo.has_section(constants.INISECT_OSP):
771 # use the parameters, without overriding
772 for name, value in einfo.items(constants.INISECT_OSP):
773 if name not in self.op.osparams:
774 self.op.osparams[name] = value
775
776 def _RevertToDefaults(self, cluster):
777 """Revert the instance parameters to the default values.
778
779 """
780 # hvparams
781 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
782 for name in self.op.hvparams.keys():
783 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
784 del self.op.hvparams[name]
785 # beparams
786 be_defs = cluster.SimpleFillBE({})
787 for name in self.op.beparams.keys():
788 if name in be_defs and be_defs[name] == self.op.beparams[name]:
789 del self.op.beparams[name]
790 # nic params
791 nic_defs = cluster.SimpleFillNIC({})
792 for nic in self.op.nics:
793 for name in constants.NICS_PARAMETERS:
794 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
795 del nic[name]
796 # osparams
797 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
798 for name in self.op.osparams.keys():
799 if name in os_defs and os_defs[name] == self.op.osparams[name]:
800 del self.op.osparams[name]
801
802 def _CalculateFileStorageDir(self):
803 """Calculate final instance file storage dir.
804
805 """
806 # file storage dir calculation/check
807 self.instance_file_storage_dir = None
808 if self.op.disk_template in constants.DTS_FILEBASED:
809 # build the full file storage dir path
810 joinargs = []
811
812 if self.op.disk_template == constants.DT_SHARED_FILE:
813 get_fsd_fn = self.cfg.GetSharedFileStorageDir
814 else:
815 get_fsd_fn = self.cfg.GetFileStorageDir
816
817 cfg_storagedir = get_fsd_fn()
818 if not cfg_storagedir:
819 raise errors.OpPrereqError("Cluster file storage dir not defined",
820 errors.ECODE_STATE)
821 joinargs.append(cfg_storagedir)
822
823 if self.op.file_storage_dir is not None:
824 joinargs.append(self.op.file_storage_dir)
825
826 joinargs.append(self.op.instance_name)
827
828 # pylint: disable=W0142
829 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
830
831 def CheckPrereq(self): # pylint: disable=R0914
832 """Check prerequisites.
833
834 """
835 self._CalculateFileStorageDir()
836
837 if self.op.mode == constants.INSTANCE_IMPORT:
838 export_info = self._ReadExportInfo()
839 self._ReadExportParams(export_info)
840 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
841 else:
842 self._old_instance_name = None
843
844 if (not self.cfg.GetVGName() and
845 self.op.disk_template not in constants.DTS_NOT_LVM):
846 raise errors.OpPrereqError("Cluster does not support lvm-based"
847 " instances", errors.ECODE_STATE)
848
849 if (self.op.hypervisor is None or
850 self.op.hypervisor == constants.VALUE_AUTO):
851 self.op.hypervisor = self.cfg.GetHypervisorType()
852
853 cluster = self.cfg.GetClusterInfo()
854 enabled_hvs = cluster.enabled_hypervisors
855 if self.op.hypervisor not in enabled_hvs:
856 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
857 " cluster (%s)" %
858 (self.op.hypervisor, ",".join(enabled_hvs)),
859 errors.ECODE_STATE)
860
861 # Check tag validity
862 for tag in self.op.tags:
863 objects.TaggableObject.ValidateTag(tag)
864
865 # check hypervisor parameter syntax (locally)
866 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
867 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
868 self.op.hvparams)
869 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
870 hv_type.CheckParameterSyntax(filled_hvp)
871 self.hv_full = filled_hvp
872 # check that we don't specify global parameters on an instance
873 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
874 "instance", "cluster")
875
876 # fill and remember the beparams dict
877 self.be_full = _ComputeFullBeParams(self.op, cluster)
878
879 # build os parameters
880 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
881
882 # now that hvp/bep are in final format, let's reset to defaults,
883 # if told to do so
884 if self.op.identify_defaults:
885 self._RevertToDefaults(cluster)
886
887 # NIC buildup
888 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
889 self.proc.GetECId())
890
891 # disk checks/pre-build
892 default_vg = self.cfg.GetVGName()
893 self.disks = ComputeDisks(self.op, default_vg)
894
895 if self.op.mode == constants.INSTANCE_IMPORT:
896 disk_images = []
897 for idx in range(len(self.disks)):
898 option = "disk%d_dump" % idx
899 if export_info.has_option(constants.INISECT_INS, option):
900 # FIXME: are the old os-es, disk sizes, etc. useful?
901 export_name = export_info.get(constants.INISECT_INS, option)
902 image = utils.PathJoin(self.op.src_path, export_name)
903 disk_images.append(image)
904 else:
905 disk_images.append(False)
906
907 self.src_images = disk_images
908
909 if self.op.instance_name == self._old_instance_name:
910 for idx, nic in enumerate(self.nics):
911 if nic.mac == constants.VALUE_AUTO:
912 nic_mac_ini = "nic%d_mac" % idx
913 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
914
915 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
916
917 # ip ping checks (we use the same ip that was resolved in ExpandNames)
918 if self.op.ip_check:
919 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
920 raise errors.OpPrereqError("IP %s of instance %s already in use" %
921 (self.check_ip, self.op.instance_name),
922 errors.ECODE_NOTUNIQUE)
923
924 #### mac address generation
925 # By generating here the mac address both the allocator and the hooks get
926 # the real final mac address rather than the 'auto' or 'generate' value.
927 # There is a race condition between the generation and the instance object
928 # creation, which means that we know the mac is valid now, but we're not
929 # sure it will be when we actually add the instance. If things go bad
930 # adding the instance will abort because of a duplicate mac, and the
931 # creation job will fail.
932 for nic in self.nics:
933 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
934 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
935
936 #### allocator run
937
938 if self.op.iallocator is not None:
939 self._RunAllocator()
940
941 # Release all unneeded node locks
942 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
943 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
944 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
945 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
946
947 assert (self.owned_locks(locking.LEVEL_NODE) ==
948 self.owned_locks(locking.LEVEL_NODE_RES)), \
949 "Node locks differ from node resource locks"
950
951 #### node related checks
952
953 # check primary node
954 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
955 assert self.pnode is not None, \
956 "Cannot retrieve locked node %s" % self.op.pnode
957 if pnode.offline:
958 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
959 pnode.name, errors.ECODE_STATE)
960 if pnode.drained:
961 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
962 pnode.name, errors.ECODE_STATE)
963 if not pnode.vm_capable:
964 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
965 " '%s'" % pnode.name, errors.ECODE_STATE)
966
967 self.secondaries = []
968
969 # Fill in any IPs from IP pools. This must happen here, because we need to
970 # know the nic's primary node, as specified by the iallocator
971 for idx, nic in enumerate(self.nics):
972 net_uuid = nic.network
973 if net_uuid is not None:
974 nobj = self.cfg.GetNetwork(net_uuid)
975 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
976 if netparams is None:
977 raise errors.OpPrereqError("No netparams found for network"
978 " %s. Propably not connected to"
979 " node's %s nodegroup" %
980 (nobj.name, self.pnode.name),
981 errors.ECODE_INVAL)
982 self.LogInfo("NIC/%d inherits netparams %s" %
983 (idx, netparams.values()))
984 nic.nicparams = dict(netparams)
985 if nic.ip is not None:
986 if nic.ip.lower() == constants.NIC_IP_POOL:
987 try:
988 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
989 except errors.ReservationError:
990 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
991 " from the address pool" % idx,
992 errors.ECODE_STATE)
993 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
994 else:
995 try:
996 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
997 except errors.ReservationError:
998 raise errors.OpPrereqError("IP address %s already in use"
999 " or does not belong to network %s" %
1000 (nic.ip, nobj.name),
1001 errors.ECODE_NOTUNIQUE)
1002
1003 # net is None, ip None or given
1004 elif self.op.conflicts_check:
1005 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1006
1007 # mirror node verification
1008 if self.op.disk_template in constants.DTS_INT_MIRROR:
1009 if self.op.snode == pnode.name:
1010 raise errors.OpPrereqError("The secondary node cannot be the"
1011 " primary node", errors.ECODE_INVAL)
1012 CheckNodeOnline(self, self.op.snode)
1013 CheckNodeNotDrained(self, self.op.snode)
1014 CheckNodeVmCapable(self, self.op.snode)
1015 self.secondaries.append(self.op.snode)
1016
1017 snode = self.cfg.GetNodeInfo(self.op.snode)
1018 if pnode.group != snode.group:
1019 self.LogWarning("The primary and secondary nodes are in two"
1020 " different node groups; the disk parameters"
1021 " from the first disk's node group will be"
1022 " used")
1023
1024 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1025 nodes = [pnode]
1026 if self.op.disk_template in constants.DTS_INT_MIRROR:
1027 nodes.append(snode)
1028 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1029 if compat.any(map(has_es, nodes)):
1030 raise errors.OpPrereqError("Disk template %s not supported with"
1031 " exclusive storage" % self.op.disk_template,
1032 errors.ECODE_STATE)
1033
1034 nodenames = [pnode.name] + self.secondaries
1035
1036 if not self.adopt_disks:
1037 if self.op.disk_template == constants.DT_RBD:
1038 # _CheckRADOSFreeSpace() is just a placeholder.
1039 # Any function that checks prerequisites can be placed here.
1040 # Check if there is enough space on the RADOS cluster.
1041 CheckRADOSFreeSpace()
1042 elif self.op.disk_template == constants.DT_EXT:
1043 # FIXME: Function that checks prereqs if needed
1044 pass
1045 else:
1046 # Check lv size requirements, if not adopting
1047 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1048 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1049
1050 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1051 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1052 disk[constants.IDISK_ADOPT])
1053 for disk in self.disks])
1054 if len(all_lvs) != len(self.disks):
1055 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1056 errors.ECODE_INVAL)
1057 for lv_name in all_lvs:
1058 try:
1059 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1060 # to ReserveLV uses the same syntax
1061 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1062 except errors.ReservationError:
1063 raise errors.OpPrereqError("LV named %s used by another instance" %
1064 lv_name, errors.ECODE_NOTUNIQUE)
1065
1066 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1067 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1068
1069 node_lvs = self.rpc.call_lv_list([pnode.name],
1070 vg_names.payload.keys())[pnode.name]
1071 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1072 node_lvs = node_lvs.payload
1073
1074 delta = all_lvs.difference(node_lvs.keys())
1075 if delta:
1076 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1077 utils.CommaJoin(delta),
1078 errors.ECODE_INVAL)
1079 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1080 if online_lvs:
1081 raise errors.OpPrereqError("Online logical volumes found, cannot"
1082 " adopt: %s" % utils.CommaJoin(online_lvs),
1083 errors.ECODE_STATE)
1084 # update the size of disk based on what is found
1085 for dsk in self.disks:
1086 dsk[constants.IDISK_SIZE] = \
1087 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1088 dsk[constants.IDISK_ADOPT])][0]))
1089
1090 elif self.op.disk_template == constants.DT_BLOCK:
1091 # Normalize and de-duplicate device paths
1092 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1093 for disk in self.disks])
1094 if len(all_disks) != len(self.disks):
1095 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1096 errors.ECODE_INVAL)
1097 baddisks = [d for d in all_disks
1098 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1099 if baddisks:
1100 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1101 " cannot be adopted" %
1102 (utils.CommaJoin(baddisks),
1103 constants.ADOPTABLE_BLOCKDEV_ROOT),
1104 errors.ECODE_INVAL)
1105
1106 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1107 list(all_disks))[pnode.name]
1108 node_disks.Raise("Cannot get block device information from node %s" %
1109 pnode.name)
1110 node_disks = node_disks.payload
1111 delta = all_disks.difference(node_disks.keys())
1112 if delta:
1113 raise errors.OpPrereqError("Missing block device(s): %s" %
1114 utils.CommaJoin(delta),
1115 errors.ECODE_INVAL)
1116 for dsk in self.disks:
1117 dsk[constants.IDISK_SIZE] = \
1118 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1119
1120 # Verify instance specs
1121 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1122 ispec = {
1123 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1124 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1125 constants.ISPEC_DISK_COUNT: len(self.disks),
1126 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1127 for disk in self.disks],
1128 constants.ISPEC_NIC_COUNT: len(self.nics),
1129 constants.ISPEC_SPINDLE_USE: spindle_use,
1130 }
1131
1132 group_info = self.cfg.GetNodeGroup(pnode.group)
1133 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1134 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1135 self.op.disk_template)
1136 if not self.op.ignore_ipolicy and res:
1137 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1138 (pnode.group, group_info.name, utils.CommaJoin(res)))
1139 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1140
1141 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1142
1143 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1144 # check OS parameters (remotely)
1145 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1146
1147 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1148
1149 #TODO: _CheckExtParams (remotely)
1150 # Check parameters for extstorage
1151
1152 # memory check on primary node
1153 #TODO(dynmem): use MINMEM for checking
1154 if self.op.start:
1155 CheckNodeFreeMemory(self, self.pnode.name,
1156 "creating instance %s" % self.op.instance_name,
1157 self.be_full[constants.BE_MAXMEM],
1158 self.op.hypervisor)
1159
1160 self.dry_run_result = list(nodenames)
1161
1162 def Exec(self, feedback_fn):
1163 """Create and add the instance to the cluster.
1164
1165 """
1166 instance = self.op.instance_name
1167 pnode_name = self.pnode.name
1168
1169 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1170 self.owned_locks(locking.LEVEL_NODE)), \
1171 "Node locks differ from node resource locks"
1172 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1173
1174 ht_kind = self.op.hypervisor
1175 if ht_kind in constants.HTS_REQ_PORT:
1176 network_port = self.cfg.AllocatePort()
1177 else:
1178 network_port = None
1179
1180 # This is ugly but we got a chicken-egg problem here
1181 # We can only take the group disk parameters, as the instance
1182 # has no disks yet (we are generating them right here).
1183 node = self.cfg.GetNodeInfo(pnode_name)
1184 nodegroup = self.cfg.GetNodeGroup(node.group)
1185 disks = GenerateDiskTemplate(self,
1186 self.op.disk_template,
1187 instance, pnode_name,
1188 self.secondaries,
1189 self.disks,
1190 self.instance_file_storage_dir,
1191 self.op.file_driver,
1192 0,
1193 feedback_fn,
1194 self.cfg.GetGroupDiskParams(nodegroup))
1195
1196 iobj = objects.Instance(name=instance, os=self.op.os_type,
1197 primary_node=pnode_name,
1198 nics=self.nics, disks=disks,
1199 disk_template=self.op.disk_template,
1200 admin_state=constants.ADMINST_DOWN,
1201 network_port=network_port,
1202 beparams=self.op.beparams,
1203 hvparams=self.op.hvparams,
1204 hypervisor=self.op.hypervisor,
1205 osparams=self.op.osparams,
1206 )
1207
1208 if self.op.tags:
1209 for tag in self.op.tags:
1210 iobj.AddTag(tag)
1211
1212 if self.adopt_disks:
1213 if self.op.disk_template == constants.DT_PLAIN:
1214 # rename LVs to the newly-generated names; we need to construct
1215 # 'fake' LV disks with the old data, plus the new unique_id
1216 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1217 rename_to = []
1218 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1219 rename_to.append(t_dsk.logical_id)
1220 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1221 self.cfg.SetDiskID(t_dsk, pnode_name)
1222 result = self.rpc.call_blockdev_rename(pnode_name,
1223 zip(tmp_disks, rename_to))
1224 result.Raise("Failed to rename adoped LVs")
1225 else:
1226 feedback_fn("* creating instance disks...")
1227 try:
1228 CreateDisks(self, iobj)
1229 except errors.OpExecError:
1230 self.LogWarning("Device creation failed")
1231 self.cfg.ReleaseDRBDMinors(instance)
1232 raise
1233
1234 feedback_fn("adding instance %s to cluster config" % instance)
1235
1236 self.cfg.AddInstance(iobj, self.proc.GetECId())
1237
1238 # Declare that we don't want to remove the instance lock anymore, as we've
1239 # added the instance to the config
1240 del self.remove_locks[locking.LEVEL_INSTANCE]
1241
1242 if self.op.mode == constants.INSTANCE_IMPORT:
1243 # Release unused nodes
1244 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1245 else:
1246 # Release all nodes
1247 ReleaseLocks(self, locking.LEVEL_NODE)
1248
1249 disk_abort = False
1250 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1251 feedback_fn("* wiping instance disks...")
1252 try:
1253 WipeDisks(self, iobj)
1254 except errors.OpExecError, err:
1255 logging.exception("Wiping disks failed")
1256 self.LogWarning("Wiping instance disks failed (%s)", err)
1257 disk_abort = True
1258
1259 if disk_abort:
1260 # Something is already wrong with the disks, don't do anything else
1261 pass
1262 elif self.op.wait_for_sync:
1263 disk_abort = not WaitForSync(self, iobj)
1264 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1265 # make sure the disks are not degraded (still sync-ing is ok)
1266 feedback_fn("* checking mirrors status")
1267 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1268 else:
1269 disk_abort = False
1270
1271 if disk_abort:
1272 RemoveDisks(self, iobj)
1273 self.cfg.RemoveInstance(iobj.name)
1274 # Make sure the instance lock gets removed
1275 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1276 raise errors.OpExecError("There are some degraded disks for"
1277 " this instance")
1278
1279 # Release all node resource locks
1280 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1281
1282 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1283 # we need to set the disks ID to the primary node, since the
1284 # preceding code might or might have not done it, depending on
1285 # disk template and other options
1286 for disk in iobj.disks:
1287 self.cfg.SetDiskID(disk, pnode_name)
1288 if self.op.mode == constants.INSTANCE_CREATE:
1289 if not self.op.no_install:
1290 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1291 not self.op.wait_for_sync)
1292 if pause_sync:
1293 feedback_fn("* pausing disk sync to install instance OS")
1294 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1295 (iobj.disks,
1296 iobj), True)
1297 for idx, success in enumerate(result.payload):
1298 if not success:
1299 logging.warn("pause-sync of instance %s for disk %d failed",
1300 instance, idx)
1301
1302 feedback_fn("* running the instance OS create scripts...")
1303 # FIXME: pass debug option from opcode to backend
1304 os_add_result = \
1305 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1306 self.op.debug_level)
1307 if pause_sync:
1308 feedback_fn("* resuming disk sync")
1309 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1310 (iobj.disks,
1311 iobj), False)
1312 for idx, success in enumerate(result.payload):
1313 if not success:
1314 logging.warn("resume-sync of instance %s for disk %d failed",
1315 instance, idx)
1316
1317 os_add_result.Raise("Could not add os for instance %s"
1318 " on node %s" % (instance, pnode_name))
1319
1320 else:
1321 if self.op.mode == constants.INSTANCE_IMPORT:
1322 feedback_fn("* running the instance OS import scripts...")
1323
1324 transfers = []
1325
1326 for idx, image in enumerate(self.src_images):
1327 if not image:
1328 continue
1329
1330 # FIXME: pass debug option from opcode to backend
1331 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1332 constants.IEIO_FILE, (image, ),
1333 constants.IEIO_SCRIPT,
1334 (iobj.disks[idx], idx),
1335 None)
1336 transfers.append(dt)
1337
1338 import_result = \
1339 masterd.instance.TransferInstanceData(self, feedback_fn,
1340 self.op.src_node, pnode_name,
1341 self.pnode.secondary_ip,
1342 iobj, transfers)
1343 if not compat.all(import_result):
1344 self.LogWarning("Some disks for instance %s on node %s were not"
1345 " imported successfully" % (instance, pnode_name))
1346
1347 rename_from = self._old_instance_name
1348
1349 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1350 feedback_fn("* preparing remote import...")
1351 # The source cluster will stop the instance before attempting to make
1352 # a connection. In some cases stopping an instance can take a long
1353 # time, hence the shutdown timeout is added to the connection
1354 # timeout.
1355 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1356 self.op.source_shutdown_timeout)
1357 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1358
1359 assert iobj.primary_node == self.pnode.name
1360 disk_results = \
1361 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1362 self.source_x509_ca,
1363 self._cds, timeouts)
1364 if not compat.all(disk_results):
1365 # TODO: Should the instance still be started, even if some disks
1366 # failed to import (valid for local imports, too)?
1367 self.LogWarning("Some disks for instance %s on node %s were not"
1368 " imported successfully" % (instance, pnode_name))
1369
1370 rename_from = self.source_instance_name
1371
1372 else:
1373 # also checked in the prereq part
1374 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1375 % self.op.mode)
1376
1377 # Run rename script on newly imported instance
1378 assert iobj.name == instance
1379 feedback_fn("Running rename script for %s" % instance)
1380 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1381 rename_from,
1382 self.op.debug_level)
1383 if result.fail_msg:
1384 self.LogWarning("Failed to run rename script for %s on node"
1385 " %s: %s" % (instance, pnode_name, result.fail_msg))
1386
1387 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1388
1389 if self.op.start:
1390 iobj.admin_state = constants.ADMINST_UP
1391 self.cfg.Update(iobj, feedback_fn)
1392 logging.info("Starting instance %s on node %s", instance, pnode_name)
1393 feedback_fn("* starting instance...")
1394 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1395 False, self.op.reason)
1396 result.Raise("Could not start instance")
1397
1398 return list(iobj.all_nodes)
1399
1400
1401 class LUInstanceRename(LogicalUnit):
1402 """Rename an instance.
1403
1404 """
1405 HPATH = "instance-rename"
1406 HTYPE = constants.HTYPE_INSTANCE
1407
1408 def CheckArguments(self):
1409 """Check arguments.
1410
1411 """
1412 if self.op.ip_check and not self.op.name_check:
1413 # TODO: make the ip check more flexible and not depend on the name check
1414 raise errors.OpPrereqError("IP address check requires a name check",
1415 errors.ECODE_INVAL)
1416
1417 def BuildHooksEnv(self):
1418 """Build hooks env.
1419
1420 This runs on master, primary and secondary nodes of the instance.
1421
1422 """
1423 env = BuildInstanceHookEnvByObject(self, self.instance)
1424 env["INSTANCE_NEW_NAME"] = self.op.new_name
1425 return env
1426
1427 def BuildHooksNodes(self):
1428 """Build hooks nodes.
1429
1430 """
1431 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1432 return (nl, nl)
1433
1434 def CheckPrereq(self):
1435 """Check prerequisites.
1436
1437 This checks that the instance is in the cluster and is not running.
1438
1439 """
1440 self.op.instance_name = ExpandInstanceName(self.cfg,
1441 self.op.instance_name)
1442 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1443 assert instance is not None
1444 CheckNodeOnline(self, instance.primary_node)
1445 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1446 msg="cannot rename")
1447 self.instance = instance
1448
1449 new_name = self.op.new_name
1450 if self.op.name_check:
1451 hostname = _CheckHostnameSane(self, new_name)
1452 new_name = self.op.new_name = hostname.name
1453 if (self.op.ip_check and
1454 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1455 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1456 (hostname.ip, new_name),
1457 errors.ECODE_NOTUNIQUE)
1458
1459 instance_list = self.cfg.GetInstanceList()
1460 if new_name in instance_list and new_name != instance.name:
1461 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1462 new_name, errors.ECODE_EXISTS)
1463
1464 def Exec(self, feedback_fn):
1465 """Rename the instance.
1466
1467 """
1468 inst = self.instance
1469 old_name = inst.name
1470
1471 rename_file_storage = False
1472 if (inst.disk_template in constants.DTS_FILEBASED and
1473 self.op.new_name != inst.name):
1474 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1475 rename_file_storage = True
1476
1477 self.cfg.RenameInstance(inst.name, self.op.new_name)
1478 # Change the instance lock. This is definitely safe while we hold the BGL.
1479 # Otherwise the new lock would have to be added in acquired mode.
1480 assert self.REQ_BGL
1481 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1482 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1483 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1484
1485 # re-read the instance from the configuration after rename
1486 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1487
1488 if rename_file_storage:
1489 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1490 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1491 old_file_storage_dir,
1492 new_file_storage_dir)
1493 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1494 " (but the instance has been renamed in Ganeti)" %
1495 (inst.primary_node, old_file_storage_dir,
1496 new_file_storage_dir))
1497
1498 StartInstanceDisks(self, inst, None)
1499 # update info on disks
1500 info = GetInstanceInfoText(inst)
1501 for (idx, disk) in enumerate(inst.disks):
1502 for node in inst.all_nodes:
1503 self.cfg.SetDiskID(disk, node)
1504 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1505 if result.fail_msg:
1506 self.LogWarning("Error setting info on node %s for disk %s: %s",
1507 node, idx, result.fail_msg)
1508 try:
1509 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1510 old_name, self.op.debug_level)
1511 msg = result.fail_msg
1512 if msg:
1513 msg = ("Could not run OS rename script for instance %s on node %s"
1514 " (but the instance has been renamed in Ganeti): %s" %
1515 (inst.name, inst.primary_node, msg))
1516 self.LogWarning(msg)
1517 finally:
1518 ShutdownInstanceDisks(self, inst)
1519
1520 return inst.name
1521
1522
1523 class LUInstanceRemove(LogicalUnit):
1524 """Remove an instance.
1525
1526 """
1527 HPATH = "instance-remove"
1528 HTYPE = constants.HTYPE_INSTANCE
1529 REQ_BGL = False
1530
1531 def ExpandNames(self):
1532 self._ExpandAndLockInstance()
1533 self.needed_locks[locking.LEVEL_NODE] = []
1534 self.needed_locks[locking.LEVEL_NODE_RES] = []
1535 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1536
1537 def DeclareLocks(self, level):
1538 if level == locking.LEVEL_NODE:
1539 self._LockInstancesNodes()
1540 elif level == locking.LEVEL_NODE_RES:
1541 # Copy node locks
1542 self.needed_locks[locking.LEVEL_NODE_RES] = \
1543 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1544
1545 def BuildHooksEnv(self):
1546 """Build hooks env.
1547
1548 This runs on master, primary and secondary nodes of the instance.
1549
1550 """
1551 env = BuildInstanceHookEnvByObject(self, self.instance)
1552 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1553 return env
1554
1555 def BuildHooksNodes(self):
1556 """Build hooks nodes.
1557
1558 """
1559 nl = [self.cfg.GetMasterNode()]
1560 nl_post = list(self.instance.all_nodes) + nl
1561 return (nl, nl_post)
1562
1563 def CheckPrereq(self):
1564 """Check prerequisites.
1565
1566 This checks that the instance is in the cluster.
1567
1568 """
1569 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1570 assert self.instance is not None, \
1571 "Cannot retrieve locked instance %s" % self.op.instance_name
1572
1573 def Exec(self, feedback_fn):
1574 """Remove the instance.
1575
1576 """
1577 instance = self.instance
1578 logging.info("Shutting down instance %s on node %s",
1579 instance.name, instance.primary_node)
1580
1581 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1582 self.op.shutdown_timeout,
1583 self.op.reason)
1584 msg = result.fail_msg
1585 if msg:
1586 if self.op.ignore_failures:
1587 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1588 else:
1589 raise errors.OpExecError("Could not shutdown instance %s on"
1590 " node %s: %s" %
1591 (instance.name, instance.primary_node, msg))
1592
1593 assert (self.owned_locks(locking.LEVEL_NODE) ==
1594 self.owned_locks(locking.LEVEL_NODE_RES))
1595 assert not (set(instance.all_nodes) -
1596 self.owned_locks(locking.LEVEL_NODE)), \
1597 "Not owning correct locks"
1598
1599 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1600
1601
1602 class LUInstanceMove(LogicalUnit):
1603 """Move an instance by data-copying.
1604
1605 """
1606 HPATH = "instance-move"
1607 HTYPE = constants.HTYPE_INSTANCE
1608 REQ_BGL = False
1609
1610 def ExpandNames(self):
1611 self._ExpandAndLockInstance()
1612 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1613 self.op.target_node = target_node
1614 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1615 self.needed_locks[locking.LEVEL_NODE_RES] = []
1616 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1617
1618 def DeclareLocks(self, level):
1619 if level == locking.LEVEL_NODE:
1620 self._LockInstancesNodes(primary_only=True)
1621 elif level == locking.LEVEL_NODE_RES:
1622 # Copy node locks
1623 self.needed_locks[locking.LEVEL_NODE_RES] = \
1624 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1625
1626 def BuildHooksEnv(self):
1627 """Build hooks env.
1628
1629 This runs on master, primary and secondary nodes of the instance.
1630
1631 """
1632 env = {
1633 "TARGET_NODE": self.op.target_node,
1634 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1635 }
1636 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1637 return env
1638
1639 def BuildHooksNodes(self):
1640 """Build hooks nodes.
1641
1642 """
1643 nl = [
1644 self.cfg.GetMasterNode(),
1645 self.instance.primary_node,
1646 self.op.target_node,
1647 ]
1648 return (nl, nl)
1649
1650 def CheckPrereq(self):
1651 """Check prerequisites.
1652
1653 This checks that the instance is in the cluster.
1654
1655 """
1656 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1657 assert self.instance is not None, \
1658 "Cannot retrieve locked instance %s" % self.op.instance_name
1659
1660 if instance.disk_template not in constants.DTS_COPYABLE:
1661 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1662 instance.disk_template, errors.ECODE_STATE)
1663
1664 node = self.cfg.GetNodeInfo(self.op.target_node)
1665 assert node is not None, \
1666 "Cannot retrieve locked node %s" % self.op.target_node
1667
1668 self.target_node = target_node = node.name
1669
1670 if target_node == instance.primary_node:
1671 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1672 (instance.name, target_node),
1673 errors.ECODE_STATE)
1674
1675 bep = self.cfg.GetClusterInfo().FillBE(instance)
1676
1677 for idx, dsk in enumerate(instance.disks):
1678 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1679 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1680 " cannot copy" % idx, errors.ECODE_STATE)
1681
1682 CheckNodeOnline(self, target_node)
1683 CheckNodeNotDrained(self, target_node)
1684 CheckNodeVmCapable(self, target_node)
1685 cluster = self.cfg.GetClusterInfo()
1686 group_info = self.cfg.GetNodeGroup(node.group)
1687 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1688 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1689 ignore=self.op.ignore_ipolicy)
1690
1691 if instance.admin_state == constants.ADMINST_UP:
1692 # check memory requirements on the secondary node
1693 CheckNodeFreeMemory(self, target_node,
1694 "failing over instance %s" %
1695 instance.name, bep[constants.BE_MAXMEM],
1696 instance.hypervisor)
1697 else:
1698 self.LogInfo("Not checking memory on the secondary node as"
1699 " instance will not be started")
1700
1701 # check bridge existance
1702 CheckInstanceBridgesExist(self, instance, node=target_node)
1703
1704 def Exec(self, feedback_fn):
1705 """Move an instance.
1706
1707 The move is done by shutting it down on its present node, copying
1708 the data over (slow) and starting it on the new node.
1709
1710 """
1711 instance = self.instance
1712
1713 source_node = instance.primary_node
1714 target_node = self.target_node
1715
1716 self.LogInfo("Shutting down instance %s on source node %s",
1717 instance.name, source_node)
1718
1719 assert (self.owned_locks(locking.LEVEL_NODE) ==
1720 self.owned_locks(locking.LEVEL_NODE_RES))
1721
1722 result = self.rpc.call_instance_shutdown(source_node, instance,
1723 self.op.shutdown_timeout,
1724 self.op.reason)
1725 msg = result.fail_msg
1726 if msg:
1727 if self.op.ignore_consistency:
1728 self.LogWarning("Could not shutdown instance %s on node %s."
1729 " Proceeding anyway. Please make sure node"
1730 " %s is down. Error details: %s",
1731 instance.name, source_node, source_node, msg)
1732 else:
1733 raise errors.OpExecError("Could not shutdown instance %s on"
1734 " node %s: %s" %
1735 (instance.name, source_node, msg))
1736
1737 # create the target disks
1738 try:
1739 CreateDisks(self, instance, target_node=target_node)
1740 except errors.OpExecError:
1741 self.LogWarning("Device creation failed")
1742 self.cfg.ReleaseDRBDMinors(instance.name)
1743 raise
1744
1745 cluster_name = self.cfg.GetClusterInfo().cluster_name
1746
1747 errs = []
1748 # activate, get path, copy the data over
1749 for idx, disk in enumerate(instance.disks):
1750 self.LogInfo("Copying data for disk %d", idx)
1751 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1752 instance.name, True, idx)
1753 if result.fail_msg:
1754 self.LogWarning("Can't assemble newly created disk %d: %s",
1755 idx, result.fail_msg)
1756 errs.append(result.fail_msg)
1757 break
1758 dev_path = result.payload
1759 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1760 target_node, dev_path,
1761 cluster_name)
1762 if result.fail_msg:
1763 self.LogWarning("Can't copy data over for disk %d: %s",
1764 idx, result.fail_msg)
1765 errs.append(result.fail_msg)
1766 break
1767
1768 if errs:
1769 self.LogWarning("Some disks failed to copy, aborting")
1770 try:
1771 RemoveDisks(self, instance, target_node=target_node)
1772 finally:
1773 self.cfg.ReleaseDRBDMinors(instance.name)
1774 raise errors.OpExecError("Errors during disk copy: %s" %
1775 (",".join(errs),))
1776
1777 instance.primary_node = target_node
1778 self.cfg.Update(instance, feedback_fn)
1779
1780 self.LogInfo("Removing the disks on the original node")
1781 RemoveDisks(self, instance, target_node=source_node)
1782
1783 # Only start the instance if it's marked as up
1784 if instance.admin_state == constants.ADMINST_UP:
1785 self.LogInfo("Starting instance %s on node %s",
1786 instance.name, target_node)
1787
1788 disks_ok, _ = AssembleInstanceDisks(self, instance,
1789 ignore_secondaries=True)
1790 if not disks_ok:
1791 ShutdownInstanceDisks(self, instance)
1792 raise errors.OpExecError("Can't activate the instance's disks")
1793
1794 result = self.rpc.call_instance_start(target_node,
1795 (instance, None, None), False,
1796 self.op.reason)
1797 msg = result.fail_msg
1798 if msg:
1799 ShutdownInstanceDisks(self, instance)
1800 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1801 (instance.name, target_node, msg))
1802
1803
1804 class LUInstanceMultiAlloc(NoHooksLU):
1805 """Allocates multiple instances at the same time.
1806
1807 """
1808 REQ_BGL = False
1809
1810 def CheckArguments(self):
1811 """Check arguments.
1812
1813 """
1814 nodes = []
1815 for inst in self.op.instances:
1816 if inst.iallocator is not None:
1817 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1818 " instance objects", errors.ECODE_INVAL)
1819 nodes.append(bool(inst.pnode))
1820 if inst.disk_template in constants.DTS_INT_MIRROR:
1821 nodes.append(bool(inst.snode))
1822
1823 has_nodes = compat.any(nodes)
1824 if compat.all(nodes) ^ has_nodes:
1825 raise errors.OpPrereqError("There are instance objects providing"
1826 " pnode/snode while others do not",
1827 errors.ECODE_INVAL)
1828
1829 if self.op.iallocator is None:
1830 default_iallocator = self.cfg.GetDefaultIAllocator()
1831 if default_iallocator and has_nodes:
1832 self.op.iallocator = default_iallocator
1833 else:
1834 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1835 " given and no cluster-wide default"
1836 " iallocator found; please specify either"
1837 " an iallocator or nodes on the instances"
1838 " or set a cluster-wide default iallocator",
1839 errors.ECODE_INVAL)
1840
1841 _CheckOpportunisticLocking(self.op)
1842
1843 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1844 if dups:
1845 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1846 utils.CommaJoin(dups), errors.ECODE_INVAL)
1847
1848 def ExpandNames(self):
1849 """Calculate the locks.
1850
1851 """
1852 self.share_locks = ShareAll()
1853 self.needed_locks = {
1854 # iallocator will select nodes and even if no iallocator is used,
1855 # collisions with LUInstanceCreate should be avoided
1856 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1857 }
1858
1859 if self.op.iallocator:
1860 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1861 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1862
1863 if self.op.opportunistic_locking:
1864 self.opportunistic_locks[locking.LEVEL_NODE] = True
1865 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1866 else:
1867 nodeslist = []
1868 for inst in self.op.instances:
1869 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1870 nodeslist.append(inst.pnode)
1871 if inst.snode is not None:
1872 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1873 nodeslist.append(inst.snode)
1874
1875 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1876 # Lock resources of instance's primary and secondary nodes (copy to
1877 # prevent accidential modification)
1878 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1879
1880 def CheckPrereq(self):
1881 """Check prerequisite.
1882
1883 """
1884 cluster = self.cfg.GetClusterInfo()
1885 default_vg = self.cfg.GetVGName()
1886 ec_id = self.proc.GetECId()
1887
1888 if self.op.opportunistic_locking:
1889 # Only consider nodes for which a lock is held
1890 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1891 else:
1892 node_whitelist = None
1893
1894 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1895 _ComputeNics(op, cluster, None,
1896 self.cfg, ec_id),
1897 _ComputeFullBeParams(op, cluster),
1898 node_whitelist)
1899 for op in self.op.instances]
1900
1901 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1902 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1903
1904 ial.Run(self.op.iallocator)
1905
1906 if not ial.success:
1907 raise errors.OpPrereqError("Can't compute nodes using"
1908 " iallocator '%s': %s" %
1909 (self.op.iallocator, ial.info),
1910 errors.ECODE_NORES)
1911
1912 self.ia_result = ial.result
1913
1914 if self.op.dry_run:
1915 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1916 constants.JOB_IDS_KEY: [],
1917 })
1918
1919 def _ConstructPartialResult(self):
1920 """Contructs the partial result.
1921
1922 """
1923 (allocatable, failed) = self.ia_result
1924 return {
1925 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1926 map(compat.fst, allocatable),
1927 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1928 }
1929
1930 def Exec(self, feedback_fn):
1931 """Executes the opcode.
1932
1933 """
1934 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1935 (allocatable, failed) = self.ia_result
1936
1937 jobs = []
1938 for (name, nodes) in allocatable:
1939 op = op2inst.pop(name)
1940
1941 if len(nodes) > 1:
1942 (op.pnode, op.snode) = nodes
1943 else:
1944 (op.pnode,) = nodes
1945
1946 jobs.append([op])
1947
1948 missing = set(op2inst.keys()) - set(failed)
1949 assert not missing, \
1950 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1951
1952 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1953
1954
1955 class _InstNicModPrivate:
1956 """Data structure for network interface modifications.
1957
1958 Used by L{LUInstanceSetParams}.
1959
1960 """
1961 def __init__(self):
1962 self.params = None
1963 self.filled = None
1964
1965
1966 def _PrepareContainerMods(mods, private_fn):
1967 """Prepares a list of container modifications by adding a private data field.
1968
1969 @type mods: list of tuples; (operation, index, parameters)
1970 @param mods: List of modifications
1971 @type private_fn: callable or None
1972 @param private_fn: Callable for constructing a private data field for a
1973 modification
1974 @rtype: list
1975
1976 """
1977 if private_fn is None:
1978 fn = lambda: None
1979 else:
1980 fn = private_fn
1981
1982 return [(op, idx, params, fn()) for (op, idx, params) in mods]
1983
1984
1985 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1986 """Checks if nodes have enough physical CPUs
1987
1988 This function checks if all given nodes have the needed number of
1989 physical CPUs. In case any node has less CPUs or we cannot get the
1990 information from the node, this function raises an OpPrereqError
1991 exception.
1992
1993 @type lu: C{LogicalUnit}
1994 @param lu: a logical unit from which we get configuration data
1995 @type nodenames: C{list}
1996 @param nodenames: the list of node names to check
1997 @type requested: C{int}
1998 @param requested: the minimum acceptable number of physical CPUs
1999 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2000 or we cannot check the node
2001
2002 """
2003 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2004 for node in nodenames:
2005 info = nodeinfo[node]
2006 info.Raise("Cannot get current information from node %s" % node,
2007 prereq=True, ecode=errors.ECODE_ENVIRON)
2008 (_, _, (hv_info, )) = info.payload
2009 num_cpus = hv_info.get("cpu_total", None)
2010 if not isinstance(num_cpus, int):
2011 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2012 " on node %s, result was '%s'" %
2013 (node, num_cpus), errors.ECODE_ENVIRON)
2014 if requested > num_cpus:
2015 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2016 "required" % (node, num_cpus, requested),
2017 errors.ECODE_NORES)
2018
2019
2020 def GetItemFromContainer(identifier, kind, container):
2021 """Return the item refered by the identifier.
2022
2023 @type identifier: string
2024 @param identifier: Item index or name or UUID
2025 @type kind: string
2026 @param kind: One-word item description
2027 @type container: list
2028 @param container: Container to get the item from
2029
2030 """
2031 # Index
2032 try:
2033 idx = int(identifier)
2034 if idx == -1:
2035 # Append
2036 absidx = len(container) - 1
2037 elif idx < 0:
2038 raise IndexError("Not accepting negative indices other than -1")
2039 elif idx > len(container):
2040 raise IndexError("Got %s index %s, but there are only %s" %
2041 (kind, idx, len(container)))
2042 else:
2043 absidx = idx
2044 return (absidx, container[idx])
2045 except ValueError:
2046 pass
2047
2048 for idx, item in enumerate(container):
2049 if item.uuid == identifier or item.name == identifier:
2050 return (idx, item)
2051
2052 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2053 (kind, identifier), errors.ECODE_NOENT)
2054
2055
2056 def _ApplyContainerMods(kind, container, chgdesc, mods,
2057 create_fn, modify_fn, remove_fn):
2058 """Applies descriptions in C{mods} to C{container}.
2059
2060 @type kind: string
2061 @param kind: One-word item description
2062 @type container: list
2063 @param container: Container to modify
2064 @type chgdesc: None or list
2065 @param chgdesc: List of applied changes
2066 @type mods: list
2067 @param mods: Modifications as returned by L{_PrepareContainerMods}
2068 @type create_fn: callable
2069 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2070 receives absolute item index, parameters and private data object as added
2071 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2072 as list
2073 @type modify_fn: callable
2074 @param modify_fn: Callback for modifying an existing item
2075 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2076 and private data object as added by L{_PrepareContainerMods}, returns
2077 changes as list
2078 @type remove_fn: callable
2079 @param remove_fn: Callback on removing item; receives absolute item index,
2080 item and private data object as added by L{_PrepareContainerMods}
2081
2082 """
2083 for (op, identifier, params, private) in mods:
2084 changes = None
2085
2086 if op == constants.DDM_ADD:
2087 # Calculate where item will be added
2088 # When adding an item, identifier can only be an index
2089 try:
2090 idx = int(identifier)
2091 except ValueError:
2092 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2093 " identifier for %s" % constants.DDM_ADD,
2094 errors.ECODE_INVAL)
2095 if idx == -1:
2096 addidx = len(container)
2097 else:
2098 if idx < 0:
2099 raise IndexError("Not accepting negative indices other than -1")
2100 elif idx > len(container):
2101 raise IndexError("Got %s index %s, but there are only %s" %
2102 (kind, idx, len(container)))
2103 addidx = idx
2104
2105 if create_fn is None:
2106 item = params
2107 else:
2108 (item, changes) = create_fn(addidx, params, private)
2109
2110 if idx == -1:
2111 container.append(item)
2112 else:
2113 assert idx >= 0
2114 assert idx <= len(container)
2115 # list.insert does so before the specified index
2116 container.insert(idx, item)
2117 else:
2118 # Retrieve existing item
2119 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2120
2121 if op == constants.DDM_REMOVE:
2122 assert not params
2123
2124 if remove_fn is not None:
2125 remove_fn(absidx, item, private)
2126
2127 changes = [("%s/%s" % (kind, absidx), "remove")]
2128
2129 assert container[absidx] == item
2130 del container[absidx]
2131 elif op == constants.DDM_MODIFY:
2132 if modify_fn is not None:
2133 changes = modify_fn(absidx, item, params, private)
2134 else:
2135 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2136
2137 assert _TApplyContModsCbChanges(changes)
2138
2139 if not (chgdesc is None or changes is None):
2140 chgdesc.extend(changes)
2141
2142
2143 def _UpdateIvNames(base_index, disks):
2144 """Updates the C{iv_name} attribute of disks.
2145
2146 @type disks: list of L{objects.Disk}
2147
2148 """
2149 for (idx, disk) in enumerate(disks):
2150 disk.iv_name = "disk/%s" % (base_index + idx, )
2151
2152
2153 class LUInstanceSetParams(LogicalUnit):
2154 """Modifies an instances's parameters.
2155
2156 """
2157 HPATH = "instance-modify"
2158 HTYPE = constants.HTYPE_INSTANCE
2159 REQ_BGL = False
2160
2161 @staticmethod
2162 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2163 assert ht.TList(mods)
2164 assert not mods or len(mods[0]) in (2, 3)
2165
2166 if mods and len(mods[0]) == 2:
2167 result = []
2168
2169 addremove = 0
2170 for op, params in mods:
2171 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2172 result.append((op, -1, params))
2173 addremove += 1
2174
2175 if addremove > 1:
2176 raise errors.OpPrereqError("Only one %s add or remove operation is"
2177 " supported at a time" % kind,
2178 errors.ECODE_INVAL)
2179 else:
2180 result.append((constants.DDM_MODIFY, op, params))
2181
2182 assert verify_fn(result)
2183 else:
2184 result = mods
2185
2186 return result
2187
2188 @staticmethod
2189 def _CheckMods(kind, mods, key_types, item_fn):
2190 """Ensures requested disk/NIC modifications are valid.
2191
2192 """
2193 for (op, _, params) in mods:
2194 assert ht.TDict(params)
2195
2196 # If 'key_types' is an empty dict, we assume we have an
2197 # 'ext' template and thus do not ForceDictType
2198 if key_types:
2199 utils.ForceDictType(params, key_types)
2200
2201 if op == constants.DDM_REMOVE:
2202 if params:
2203 raise errors.OpPrereqError("No settings should be passed when"
2204 " removing a %s" % kind,
2205 errors.ECODE_INVAL)
2206 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2207 item_fn(op, params)
2208 else:
2209 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2210
2211 @staticmethod
2212 def _VerifyDiskModification(op, params):
2213 """Verifies a disk modification.
2214
2215 """
2216 if op == constants.DDM_ADD:
2217 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2218 if mode not in constants.DISK_ACCESS_SET:
2219 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2220 errors.ECODE_INVAL)
2221
2222 size = params.get(constants.IDISK_SIZE, None)
2223 if size is None:
2224 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2225 constants.IDISK_SIZE, errors.ECODE_INVAL)
2226
2227 try:
2228 size = int(size)
2229 except (TypeError, ValueError), err:
2230 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2231 errors.ECODE_INVAL)
2232
2233 params[constants.IDISK_SIZE] = size
2234 name = params.get(constants.IDISK_NAME, None)
2235 if name is not None and name.lower() == constants.VALUE_NONE:
2236 params[constants.IDISK_NAME] = None
2237
2238 elif op == constants.DDM_MODIFY:
2239 if constants.IDISK_SIZE in params:
2240 raise errors.OpPrereqError("Disk size change not possible, use"
2241 " grow-disk", errors.ECODE_INVAL)
2242 if len(params) > 2:
2243 raise errors.OpPrereqError("Disk modification doesn't support"
2244 " additional arbitrary parameters",
2245 errors.ECODE_INVAL)
2246 name = params.get(constants.IDISK_NAME, None)
2247 if name is not None and name.lower() == constants.VALUE_NONE:
2248 params[constants.IDISK_NAME] = None
2249
2250 @staticmethod
2251 def _VerifyNicModification(op, params):
2252 """Verifies a network interface modification.
2253
2254 """
2255 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2256 ip = params.get(constants.INIC_IP, None)
2257 name = params.get(constants.INIC_NAME, None)
2258 req_net = params.get(constants.INIC_NETWORK, None)
2259 link = params.get(constants.NIC_LINK, None)
2260 mode = params.get(constants.NIC_MODE, None)
2261 if name is not None and name.lower() == constants.VALUE_NONE:
2262 params[constants.INIC_NAME] = None
2263 if req_net is not None:
2264 if req_net.lower() == constants.VALUE_NONE:
2265 params[constants.INIC_NETWORK] = None
2266 req_net = None
2267 elif link is not None or mode is not None:
2268 raise errors.OpPrereqError("If network is given"
2269 " mode or link should not",
2270 errors.ECODE_INVAL)
2271
2272 if op == constants.DDM_ADD:
2273 macaddr = params.get(constants.INIC_MAC, None)
2274 if macaddr is None:
2275 params[constants.INIC_MAC] = constants.VALUE_AUTO
2276
2277 if ip is not None:
2278 if ip.lower() == constants.VALUE_NONE:
2279 params[constants.INIC_IP] = None
2280 else:
2281 if ip.lower() == constants.NIC_IP_POOL:
2282 if op == constants.DDM_ADD and req_net is None:
2283 raise errors.OpPrereqError("If ip=pool, parameter network"
2284 " cannot be none",
2285 errors.ECODE_INVAL)
2286 else:
2287 if not netutils.IPAddress.IsValid(ip):
2288 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2289 errors.ECODE_INVAL)
2290
2291 if constants.INIC_MAC in params:
2292 macaddr = params[constants.INIC_MAC]
2293 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2294 macaddr = utils.NormalizeAndValidateMac(macaddr)
2295
2296 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2297 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2298 " modifying an existing NIC",
2299 errors.ECODE_INVAL)
2300
2301 def CheckArguments(self):
2302 if not (self.op.nics or self.op.disks or self.op.disk_template or
2303 self.op.hvparams or self.op.beparams or self.op.os_name or
2304 self.op.offline is not None or self.op.runtime_mem or
2305 self.op.pnode):
2306 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2307
2308 if self.op.hvparams:
2309 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2310 "hypervisor", "instance", "cluster")
2311
2312 self.op.disks = self._UpgradeDiskNicMods(
2313 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2314 self.op.nics = self._UpgradeDiskNicMods(
2315 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2316
2317 if self.op.disks and self.op.disk_template is not None:
2318 raise errors.OpPrereqError("Disk template conversion and other disk"
2319 " changes not supported at the same time",
2320 errors.ECODE_INVAL)
2321
2322 if (self.op.disk_template and
2323 self.op.disk_template in constants.DTS_INT_MIRROR and
2324 self.op.remote_node is None):
2325 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2326 " one requires specifying a secondary node",
2327 errors.ECODE_INVAL)
2328
2329 # Check NIC modifications
2330 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2331 self._VerifyNicModification)
2332
2333 if self.op.pnode:
2334 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2335
2336 def ExpandNames(self):
2337 self._ExpandAndLockInstance()
2338 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2339 # Can't even acquire node locks in shared mode as upcoming changes in
2340 # Ganeti 2.6 will start to modify the node object on disk conversion
2341 self.needed_locks[locking.LEVEL_NODE] = []
2342 self.needed_locks[locking.LEVEL_NODE_RES] = []
2343 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2344 # Look node group to look up the ipolicy
2345 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2346
2347 def DeclareLocks(self, level):
2348 if level == locking.LEVEL_NODEGROUP:
2349 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2350 # Acquire locks for the instance's nodegroups optimistically. Needs
2351 # to be verified in CheckPrereq
2352 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2353 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2354 elif level == locking.LEVEL_NODE:
2355 self._LockInstancesNodes()
2356 if self.op.disk_template and self.op.remote_node:
2357 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2358 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2359 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2360 # Copy node locks
2361 self.needed_locks[locking.LEVEL_NODE_RES] = \
2362 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2363
2364 def BuildHooksEnv(self):
2365 """Build hooks env.
2366
2367 This runs on the master, primary and secondaries.
2368
2369 """
2370 args = {}
2371 if constants.BE_MINMEM in self.be_new:
2372 args["minmem"] = self.be_new[constants.BE_MINMEM]
2373 if constants.BE_MAXMEM in self.be_new:
2374 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2375 if constants.BE_VCPUS in self.be_new:
2376 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2377 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2378 # information at all.
2379
2380 if self._new_nics is not None:
2381 nics = []
2382
2383 for nic in self._new_nics:
2384 n = copy.deepcopy(nic)
2385 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2386 n.nicparams = nicparams
2387 nics.append(NICToTuple(self, n))
2388
2389 args["nics"] = nics
2390
2391 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2392 if self.op.disk_template:
2393 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2394 if self.op.runtime_mem:
2395 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2396
2397 return env
2398
2399 def BuildHooksNodes(self):
2400 """Build hooks nodes.
2401
2402 """
2403 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2404 return (nl, nl)
2405
2406 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2407 old_params, cluster, pnode):
2408
2409 update_params_dict = dict([(key, params[key])
2410 for key in constants.NICS_PARAMETERS
2411 if key in params])
2412
2413 req_link = update_params_dict.get(constants.NIC_LINK, None)
2414 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2415
2416 new_net_uuid = None
2417 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2418 if new_net_uuid_or_name:
2419 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2420 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2421
2422 if old_net_uuid:
2423 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2424
2425 if new_net_uuid:
2426 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2427 if not netparams:
2428 raise errors.OpPrereqError("No netparams found for the network"
2429 " %s, probably not connected" %
2430 new_net_obj.name, errors.ECODE_INVAL)
2431 new_params = dict(netparams)
2432 else:
2433 new_params = GetUpdatedParams(old_params, update_params_dict)
2434
2435 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2436
2437 new_filled_params = cluster.SimpleFillNIC(new_params)
2438 objects.NIC.CheckParameterSyntax(new_filled_params)
2439
2440 new_mode = new_filled_params[constants.NIC_MODE]
2441 if new_mode == constants.NIC_MODE_BRIDGED:
2442 bridge = new_filled_params[constants.NIC_LINK]
2443 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2444 if msg:
2445 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2446 if self.op.force:
2447 self.warn.append(msg)
2448 else:
2449 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2450
2451 elif new_mode == constants.NIC_MODE_ROUTED:
2452 ip = params.get(constants.INIC_IP, old_ip)
2453 if ip is None:
2454 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2455 " on a routed NIC", errors.ECODE_INVAL)
2456
2457 elif new_mode == constants.NIC_MODE_OVS:
2458 # TODO: check OVS link
2459 self.LogInfo("OVS links are currently not checked for correctness")
2460
2461 if constants.INIC_MAC in params:
2462 mac = params[constants.INIC_MAC]
2463 if mac is None:
2464 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2465 errors.ECODE_INVAL)
2466 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2467 # otherwise generate the MAC address
2468 params[constants.INIC_MAC] = \
2469 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2470 else:
2471 # or validate/reserve the current one
2472 try:
2473 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2474 except errors.ReservationError:
2475 raise errors.OpPrereqError("MAC address '%s' already in use"
2476 " in cluster" % mac,
2477 errors.ECODE_NOTUNIQUE)
2478 elif new_net_uuid != old_net_uuid:
2479
2480 def get_net_prefix(net_uuid):
2481 mac_prefix = None
2482 if net_uuid:
2483 nobj = self.cfg.GetNetwork(net_uuid)
2484 mac_prefix = nobj.mac_prefix
2485
2486 return mac_prefix
2487
2488 new_prefix = get_net_prefix(new_net_uuid)
2489 old_prefix = get_net_prefix(old_net_uuid)
2490 if old_prefix != new_prefix:
2491 params[constants.INIC_MAC] = \
2492 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2493
2494 # if there is a change in (ip, network) tuple
2495 new_ip = params.get(constants.INIC_IP, old_ip)
2496 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2497 if new_ip:
2498 # if IP is pool then require a network and generate one IP
2499 if new_ip.lower() == constants.NIC_IP_POOL:
2500 if new_net_uuid:
2501 try:
2502 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2503 except errors.ReservationError:
2504 raise errors.OpPrereqError("Unable to get a free IP"
2505 " from the address pool",
2506 errors.ECODE_STATE)
2507 self.LogInfo("Chose IP %s from network %s",
2508 new_ip,
2509 new_net_obj.name)
2510 params[constants.INIC_IP] = new_ip
2511 else:
2512 raise errors.OpPrereqError("ip=pool, but no network found",
2513 errors.ECODE_INVAL)
2514 # Reserve new IP if in the new network if any
2515 elif new_net_uuid:
2516 try:
2517 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2518 self.LogInfo("Reserving IP %s in network %s",
2519 new_ip, new_net_obj.name)
2520 except errors.ReservationError:
2521 raise errors.OpPrereqError("IP %s not available in network %s" %
2522 (new_ip, new_net_obj.name),
2523 errors.ECODE_NOTUNIQUE)
2524 # new network is None so check if new IP is a conflicting IP
2525 elif self.op.conflicts_check:
2526 _CheckForConflictingIp(self, new_ip, pnode)
2527
2528 # release old IP if old network is not None
2529 if old_ip and old_net_uuid:
2530 try:
2531 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2532 except errors.AddressPoolError:
2533 logging.warning("Release IP %s not contained in network %s",
2534 old_ip, old_net_obj.name)
2535
2536 # there are no changes in (ip, network) tuple and old network is not None
2537 elif (old_net_uuid is not None and
2538 (req_link is not None or req_mode is not None)):
2539 raise errors.OpPrereqError("Not allowed to change link or mode of"
2540 " a NIC that is connected to a network",
2541 errors.ECODE_INVAL)
2542
2543 private.params = new_params
2544 private.filled = new_filled_params
2545
2546 def _PreCheckDiskTemplate(self, pnode_info):
2547 """CheckPrereq checks related to a new disk template."""
2548 # Arguments are passed to avoid configuration lookups
2549 instance = self.instance
2550 pnode = instance.primary_node
2551 cluster = self.cluster
2552 if instance.disk_template == self.op.disk_template:
2553 raise errors.OpPrereqError("Instance already has disk template %s" %
2554 instance.disk_template, errors.ECODE_INVAL)
2555
2556 if (instance.disk_template,
2557 self.op.disk_template) not in self._DISK_CONVERSIONS:
2558 raise errors.OpPrereqError("Unsupported disk template conversion from"
2559 " %s to %s" % (instance.disk_template,
2560 self.op.disk_template),
2561 errors.ECODE_INVAL)
2562 CheckInstanceState(self, instance, INSTANCE_DOWN,
2563 msg="cannot change disk template")
2564 if self.op.disk_template in constants.DTS_INT_MIRROR:
2565 if self.op.remote_node == pnode:
2566 raise errors.OpPrereqError("Given new secondary node %s is the same"
2567 " as the primary node of the instance" %
2568 self.op.remote_node, errors.ECODE_STATE)
2569 CheckNodeOnline(self, self.op.remote_node)
2570 CheckNodeNotDrained(self, self.op.remote_node)
2571 # FIXME: here we assume that the old instance type is DT_PLAIN
2572 assert instance.disk_template == constants.DT_PLAIN
2573 disks = [{constants.IDISK_SIZE: d.size,
2574 constants.IDISK_VG: d.logical_id[0]}
2575 for d in instance.disks]
2576 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2577 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2578
2579 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2580 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2581 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2582 snode_group)
2583 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2584 ignore=self.op.ignore_ipolicy)
2585 if pnode_info.group != snode_info.group:
2586 self.LogWarning("The primary and secondary nodes are in two"
2587 " different node groups; the disk parameters"
2588 " from the first disk's node group will be"
2589 " used")
2590
2591 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2592 # Make sure none of the nodes require exclusive storage
2593 nodes = [pnode_info]
2594 if self.op.disk_template in constants.DTS_INT_MIRROR:
2595 assert snode_info
2596 nodes.append(snode_info)
2597 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2598 if compat.any(map(has_es, nodes)):
2599 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2600 " storage is enabled" % (instance.disk_template,
2601 self.op.disk_template))
2602 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2603
2604 def CheckPrereq(self):
2605 """Check prerequisites.
2606
2607 This only checks the instance list against the existing names.
2608
2609 """
2610 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2611 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2612
2613 cluster = self.cluster = self.cfg.GetClusterInfo()
2614 assert self.instance is not None, \
2615 "Cannot retrieve locked instance %s" % self.op.instance_name
2616
2617 pnode = instance.primary_node
2618
2619 self.warn = []
2620
2621 if (self.op.pnode is not None and self.op.pnode != pnode and
2622 not self.op.force):
2623 # verify that the instance is not up
2624 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2625 instance.hypervisor)
2626 if instance_info.fail_msg:
2627 self.warn.append("Can't get instance runtime information: %s" %
2628 instance_info.fail_msg)
2629 elif instance_info.payload:
2630 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2631 errors.ECODE_STATE)
2632
2633 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2634 nodelist = list(instance.all_nodes)
2635 pnode_info = self.cfg.GetNodeInfo(pnode)
2636 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2637
2638 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2639 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2640 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2641
2642 # dictionary with instance information after the modification
2643 ispec = {}
2644
2645 # Check disk modifications. This is done here and not in CheckArguments
2646 # (as with NICs), because we need to know the instance's disk template
2647 if instance.disk_template == constants.DT_EXT:
2648 self._CheckMods("disk", self.op.disks, {},
2649 self._VerifyDiskModification)
2650 else:
2651 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2652 self._VerifyDiskModification)
2653
2654 # Prepare disk/NIC modifications
2655 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2656 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2657
2658 # Check the validity of the `provider' parameter
2659 if instance.disk_template in constants.DT_EXT:
2660 for mod in self.diskmod:
2661 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2662 if mod[0] == constants.DDM_ADD:
2663 if ext_provider is None:
2664 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2665 " '%s' missing, during disk add" %
2666 (constants.DT_EXT,
2667 constants.IDISK_PROVIDER),
2668 errors.ECODE_NOENT)
2669 elif mod[0] == constants.DDM_MODIFY:
2670 if ext_provider:
2671 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2672 " modification" %
2673 constants.IDISK_PROVIDER,
2674 errors.ECODE_INVAL)
2675 else:
2676 for mod in self.diskmod:
2677 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2678 if ext_provider is not None:
2679 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2680 " instances of type '%s'" %
2681 (constants.IDISK_PROVIDER,
2682 constants.DT_EXT),
2683 errors.ECODE_INVAL)
2684
2685 # OS change
2686 if self.op.os_name and not self.op.force:
2687 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2688 self.op.force_variant)
2689 instance_os = self.op.os_name
2690 else:
2691 instance_os = instance.os
2692
2693 assert not (self.op.disk_template and self.op.disks), \
2694 "Can't modify disk template and apply disk changes at the same time"
2695
2696 if self.op.disk_template:
2697 self._PreCheckDiskTemplate(pnode_info)
2698
2699 # hvparams processing
2700 if self.op.hvparams:
2701 hv_type = instance.hypervisor
2702 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2703 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2704 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2705
2706 # local check
2707 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2708 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2709 self.hv_proposed = self.hv_new = hv_new # the new actual values
2710 self.hv_inst = i_hvdict # the new dict (without defaults)
2711 else:
2712 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2713 instance.hvparams)
2714 self.hv_new = self.hv_inst = {}
2715
2716 # beparams processing
2717 if self.op.beparams:
2718 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2719 use_none=True)
2720 objects.UpgradeBeParams(i_bedict)
2721 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2722 be_new = cluster.SimpleFillBE(i_bedict)
2723 self.be_proposed = self.be_new = be_new # the new actual values
2724 self.be_inst = i_bedict # the new dict (without defaults)
2725 else:
2726 self.be_new = self.be_inst = {}
2727 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2728 be_old = cluster.FillBE(instance)
2729
2730 # CPU param validation -- checking every time a parameter is
2731 # changed to cover all cases where either CPU mask or vcpus have
2732 # changed
2733 if (constants.BE_VCPUS in self.be_proposed and
2734 constants.HV_CPU_MASK in self.hv_proposed):
2735 cpu_list = \
2736 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2737 # Verify mask is consistent with number of vCPUs. Can skip this
2738 # test if only 1 entry in the CPU mask, which means same mask
2739 # is applied to all vCPUs.
2740 if (len(cpu_list) > 1 and
2741 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2742 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2743 " CPU mask [%s]" %
2744 (self.be_proposed[constants.BE_VCPUS],
2745 self.hv_proposed[constants.HV_CPU_MASK]),
2746 errors.ECODE_INVAL)
2747
2748 # Only perform this test if a new CPU mask is given
2749 if constants.HV_CPU_MASK in self.hv_new:
2750 # Calculate the largest CPU number requested
2751 max_requested_cpu = max(map(max, cpu_list))
2752 # Check that all of the instance's nodes have enough physical CPUs to
2753 # satisfy the requested CPU mask
2754 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2755 max_requested_cpu + 1, instance.hypervisor)
2756
2757 # osparams processing
2758 if self.op.osparams:
2759 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2760 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2761 self.os_inst = i_osdict # the new dict (without defaults)
2762 else:
2763 self.os_inst = {}
2764
2765 #TODO(dynmem): do the appropriate check involving MINMEM
2766 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2767 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2768 mem_check_list = [pnode]
2769 if be_new[constants.BE_AUTO_BALANCE]:
2770 # either we changed auto_balance to yes or it was from before
2771 mem_check_list.extend(instance.secondary_nodes)
2772 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2773 instance.hypervisor)
2774 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2775 [instance.hypervisor], False)
2776 pninfo = nodeinfo[pnode]
2777 msg = pninfo.fail_msg
2778 if msg:
2779 # Assume the primary node is unreachable and go ahead
2780 self.warn.append("Can't get info from primary node %s: %s" %
2781 (pnode, msg))
2782 else:
2783 (_, _, (pnhvinfo, )) = pninfo.payload
2784 if not isinstance(pnhvinfo.get("memory_free", None), int):
2785 self.warn.append("Node data from primary node %s doesn't contain"
2786 " free memory information" % pnode)
2787 elif instance_info.fail_msg:
2788 self.warn.append("Can't get instance runtime information: %s" %
2789 instance_info.fail_msg)
2790 else:
2791 if instance_info.payload:
2792 current_mem = int(instance_info.payload["memory"])
2793 else:
2794 # Assume instance not running
2795 # (there is a slight race condition here, but it's not very
2796 # probable, and we have no other way to check)
2797 # TODO: Describe race condition
2798 current_mem = 0
2799 #TODO(dynmem): do the appropriate check involving MINMEM
2800 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2801 pnhvinfo["memory_free"])
2802 if miss_mem > 0:
2803 raise errors.OpPrereqError("This change will prevent the instance"
2804 " from starting, due to %d MB of memory"
2805 " missing on its primary node" %
2806 miss_mem, errors.ECODE_NORES)
2807
2808 if be_new[constants.BE_AUTO_BALANCE]:
2809 for node, nres in nodeinfo.items():
2810 if node not in instance.secondary_nodes:
2811 continue
2812 nres.Raise("Can't get info from secondary node %s" % node,
2813 prereq=True, ecode=errors.ECODE_STATE)
2814 (_, _, (nhvinfo, )) = nres.payload
2815 if not isinstance(nhvinfo.get("memory_free", None), int):
2816 raise errors.OpPrereqError("Secondary node %s didn't return free"
2817 " memory information" % node,
2818 errors.ECODE_STATE)
2819 #TODO(dynmem): do the appropriate check involving MINMEM
2820 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2821 raise errors.OpPrereqError("This change will prevent the instance"
2822 " from failover to its secondary node"
2823 " %s, due to not enough memory" % node,
2824 errors.ECODE_STATE)
2825
2826 if self.op.runtime_mem:
2827 remote_info = self.rpc.call_instance_info(instance.primary_node,
2828 instance.name,
2829 instance.hypervisor)
2830 remote_info.Raise("Error checking node %s" % instance.primary_node)
2831 if not remote_info.payload: # not running already
2832 raise errors.OpPrereqError("Instance %s is not running" %
2833 instance.name, errors.ECODE_STATE)
2834
2835 current_memory = remote_info.payload["memory"]
2836 if (not self.op.force and
2837 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2838 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2839 raise errors.OpPrereqError("Instance %s must have memory between %d"
2840 " and %d MB of memory unless --force is"
2841 " given" %
2842 (instance.name,
2843 self.be_proposed[constants.BE_MINMEM],
2844 self.be_proposed[constants.BE_MAXMEM]),
2845 errors.ECODE_INVAL)
2846
2847 delta = self.op.runtime_mem - current_memory
2848 if delta > 0:
2849 CheckNodeFreeMemory(self, instance.primary_node,
2850 "ballooning memory for instance %s" %
2851 instance.name, delta, instance.hypervisor)
2852
2853 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2854 raise errors.OpPrereqError("Disk operations not supported for"
2855 " diskless instances", errors.ECODE_INVAL)
2856
2857 def _PrepareNicCreate(_, params, private):
2858 self._PrepareNicModification(params, private, None, None,
2859 {}, cluster, pnode)
2860 return (None, None)
2861
2862 def _PrepareNicMod(_, nic, params, private):
2863 self._PrepareNicModification(params, private, nic.ip, nic.network,
2864 nic.nicparams, cluster, pnode)
2865 return None
2866
2867 def _PrepareNicRemove(_, params, __):
2868 ip = params.ip
2869 net = params.network
2870 if net is not None and ip is not None:
2871 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2872
2873 # Verify NIC changes (operating on copy)
2874 nics = instance.nics[:]
2875 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2876 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2877 if len(nics) > constants.MAX_NICS:
2878 raise errors.OpPrereqError("Instance has too many network interfaces"
2879 " (%d), cannot add more" % constants.MAX_NICS,
2880 errors.ECODE_STATE)
2881
2882 def _PrepareDiskMod(_, disk, params, __):
2883 disk.name = params.get(constants.IDISK_NAME, None)
2884
2885 # Verify disk changes (operating on a copy)
2886 disks = copy.deepcopy(instance.disks)
2887 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2888 _PrepareDiskMod, None)
2889 utils.ValidateDeviceNames("disk", disks)
2890 if len(disks) > constants.MAX_DISKS:
2891 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2892 " more" % constants.MAX_DISKS,
2893 errors.ECODE_STATE)
2894 disk_sizes = [disk.size for disk in instance.disks]
2895 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2896 self.diskmod if op == constants.DDM_ADD)
2897 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2898 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2899
2900 if self.op.offline is not None and self.op.offline:
2901 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2902 msg="can't change to offline")
2903
2904 # Pre-compute NIC changes (necessary to use result in hooks)
2905 self._nic_chgdesc = []
2906 if self.nicmod:
2907 # Operate on copies as this is still in prereq
2908 nics = [nic.Copy() for nic in instance.nics]
2909 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2910 self._CreateNewNic, self._ApplyNicMods, None)
2911 # Verify that NIC names are unique and valid
2912 utils.ValidateDeviceNames("NIC", nics)
2913 self._new_nics = nics
2914 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2915 else:
2916 self._new_nics = None
2917 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2918
2919 if not self.op.ignore_ipolicy:
2920 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2921 group_info)
2922
2923 # Fill ispec with backend parameters
2924 ispec[constants.ISPEC_SPINDLE_USE] = \
2925 self.be_new.get(constants.BE_SPINDLE_USE, None)
2926 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2927 None)
2928
2929 # Copy ispec to verify parameters with min/max values separately
2930 if self.op.disk_template:
2931 new_disk_template = self.op.disk_template
2932 else:
2933 new_disk_template = instance.disk_template
2934 ispec_max = ispec.copy()
2935 ispec_max[constants.ISPEC_MEM_SIZE] = \
2936 self.be_new.get(constants.BE_MAXMEM, None)
2937 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2938 new_disk_template)
2939 ispec_min = ispec.copy()
2940 ispec_min[constants.ISPEC_MEM_SIZE] = \
2941 self.be_new.get(constants.BE_MINMEM, None)
2942 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2943 new_disk_template)
2944
2945 if (res_max or res_min):
2946 # FIXME: Improve error message by including information about whether
2947 # the upper or lower limit of the parameter fails the ipolicy.
2948 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2949 (group_info, group_info.name,
2950 utils.CommaJoin(set(res_max + res_min))))
2951 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2952
2953 def _ConvertPlainToDrbd(self, feedback_fn):
2954 """Converts an instance from plain to drbd.
2955
2956 """
2957 feedback_fn("Converting template to drbd")
2958 instance = self.instance
2959 pnode = instance.primary_node
2960 snode = self.op.remote_node
2961
2962 assert instance.disk_template == constants.DT_PLAIN
2963
2964 # create a fake disk info for _GenerateDiskTemplate
2965 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2966 constants.IDISK_VG: d.logical_id[0],
2967 constants.IDISK_NAME: d.name}
2968 for d in instance.disks]
2969 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2970 instance.name, pnode, [snode],
2971 disk_info, None, None, 0, feedback_fn,
2972 self.diskparams)
2973 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2974 self.diskparams)
2975 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2976 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2977 info = GetInstanceInfoText(instance)
2978 feedback_fn("Creating additional volumes...")
2979 # first, create the missing data and meta devices
2980 for disk in anno_disks:
2981 # unfortunately this is... not too nice
2982 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2983 info, True, p_excl_stor)
2984 for child in disk.children:
2985 CreateSingleBlockDev(self, snode, instance, child, info, True,
2986 s_excl_stor)
2987 # at this stage, all new LVs have been created, we can rename the
2988 # old ones
2989 feedback_fn("Renaming original volumes...")
2990 rename_list = [(o, n.children[0].logical_id)
2991 for (o, n) in zip(instance.disks, new_disks)]
2992 result = self.rpc.call_blockdev_rename(pnode, rename_list)
2993 result.Raise("Failed to rename original LVs")
2994
2995 feedback_fn("Initializing DRBD devices...")
2996 # all child devices are in place, we can now create the DRBD devices