Add disks_active to configuration
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64
65 import ganeti.masterd.instance
66
67
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
69 #: callbacks
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72 ht.TNonEmptyString,
73 ht.TAny,
74 ])))
75
76
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
79
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
82
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
86
87 """
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
95 return hostname
96
97
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
100
101 """
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
105 errors.ECODE_INVAL)
106
107
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
110
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
117
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
119
120 """
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
124 tags=op.tags,
125 os=op.os_type,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
129 disks=disks,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
133
134
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
137
138 @param op: The instance opcode
139 @param cluster: The cluster config object
140
141 @return: The fully filled beparams
142
143 """
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
151
152
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
155
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
161
162 @returns: The build up nics
163
164 """
165 nics = []
166 for nic in op.nics:
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
175
176 if net is None or net.lower() == constants.VALUE_NONE:
177 net = None
178 else:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
182 errors.ECODE_INVAL)
183
184 # ip validity checks
185 if ip is None or ip.lower() == constants.VALUE_NONE:
186 nic_ip = None
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
191 errors.ECODE_INVAL)
192 nic_ip = default_ip
193 else:
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
197 if net is None:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
200 errors.ECODE_INVAL)
201
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204 errors.ECODE_INVAL)
205
206 nic_ip = ip
207
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
211 errors.ECODE_INVAL)
212
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
217
218 try:
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
223 " in cluster" % mac,
224 errors.ECODE_NOTUNIQUE)
225
226 # Build nic parameters
227 nicparams = {}
228 if nic_mode_req:
229 nicparams[constants.NIC_MODE] = nic_mode
230 if link:
231 nicparams[constants.NIC_LINK] = link
232
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
238 name = None
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 nics.append(nic_obj)
243
244 return nics
245
246
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
249
250 @type ip: string
251 @param ip: IP address
252 @type node: string
253 @param node: node name
254
255 """
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
260 (ip, conf_net)),
261 errors.ECODE_STATE)
262
263 return (None, None)
264
265
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
270
271 @type ipolicy: dict
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
279
280 """
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
290
291
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
294
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
297 @type name: string
298 @param name: OS name passed by the user, to check for validity
299
300 """
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
303 if variant:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
306 errors.ECODE_INVAL)
307 return
308 if not variant:
309 raise errors.OpPrereqError("OS name must include a variant",
310 errors.ECODE_INVAL)
311
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314
315
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
318
319 """
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
322 REQ_BGL = False
323
324 def CheckArguments(self):
325 """Check arguments.
326
327 """
328 # do not require name_check to ease forward/backward compatibility
329 # for tools
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
336
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
341
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
347
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
350
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
358
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
365 has_adopt = True
366 else:
367 has_no_adopt = True
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
370 errors.ECODE_INVAL)
371 if has_adopt:
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
376 errors.ECODE_INVAL)
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
383 else:
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
388 errors.ECODE_INVAL)
389
390 self.adopt_disks = has_adopt
391
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
398 else:
399 self.check_ip = None
400
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
406
407 if self.op.disk_template == constants.DT_FILE:
408 opcodes.RequireFileStorage()
409 elif self.op.disk_template == constants.DT_SHARED_FILE:
410 opcodes.RequireSharedFileStorage()
411
412 ### Node/iallocator related checks
413 CheckIAllocatorOrNode(self, "iallocator", "pnode")
414
415 if self.op.pnode is not None:
416 if self.op.disk_template in constants.DTS_INT_MIRROR:
417 if self.op.snode is None:
418 raise errors.OpPrereqError("The networked disk templates need"
419 " a mirror node", errors.ECODE_INVAL)
420 elif self.op.snode:
421 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
422 " template")
423 self.op.snode = None
424
425 _CheckOpportunisticLocking(self.op)
426
427 self._cds = GetClusterDomainSecret()
428
429 if self.op.mode == constants.INSTANCE_IMPORT:
430 # On import force_variant must be True, because if we forced it at
431 # initial install, our only chance when importing it back is that it
432 # works again!
433 self.op.force_variant = True
434
435 if self.op.no_install:
436 self.LogInfo("No-installation mode has no effect during import")
437
438 elif self.op.mode == constants.INSTANCE_CREATE:
439 if self.op.os_type is None:
440 raise errors.OpPrereqError("No guest OS specified",
441 errors.ECODE_INVAL)
442 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
443 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
444 " installation" % self.op.os_type,
445 errors.ECODE_STATE)
446 if self.op.disk_template is None:
447 raise errors.OpPrereqError("No disk template specified",
448 errors.ECODE_INVAL)
449
450 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
451 # Check handshake to ensure both clusters have the same domain secret
452 src_handshake = self.op.source_handshake
453 if not src_handshake:
454 raise errors.OpPrereqError("Missing source handshake",
455 errors.ECODE_INVAL)
456
457 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
458 src_handshake)
459 if errmsg:
460 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
461 errors.ECODE_INVAL)
462
463 # Load and check source CA
464 self.source_x509_ca_pem = self.op.source_x509_ca
465 if not self.source_x509_ca_pem:
466 raise errors.OpPrereqError("Missing source X509 CA",
467 errors.ECODE_INVAL)
468
469 try:
470 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
471 self._cds)
472 except OpenSSL.crypto.Error, err:
473 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
474 (err, ), errors.ECODE_INVAL)
475
476 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
477 if errcode is not None:
478 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
479 errors.ECODE_INVAL)
480
481 self.source_x509_ca = cert
482
483 src_instance_name = self.op.source_instance_name
484 if not src_instance_name:
485 raise errors.OpPrereqError("Missing source instance name",
486 errors.ECODE_INVAL)
487
488 self.source_instance_name = \
489 netutils.GetHostname(name=src_instance_name).name
490
491 else:
492 raise errors.OpPrereqError("Invalid instance creation mode %r" %
493 self.op.mode, errors.ECODE_INVAL)
494
495 def ExpandNames(self):
496 """ExpandNames for CreateInstance.
497
498 Figure out the right locks for instance creation.
499
500 """
501 self.needed_locks = {}
502
503 instance_name = self.op.instance_name
504 # this is just a preventive check, but someone might still add this
505 # instance in the meantime, and creation will fail at lock-add time
506 if instance_name in self.cfg.GetInstanceList():
507 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 instance_name, errors.ECODE_EXISTS)
509
510 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
511
512 if self.op.iallocator:
513 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514 # specifying a group on instance creation and then selecting nodes from
515 # that group
516 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
518
519 if self.op.opportunistic_locking:
520 self.opportunistic_locks[locking.LEVEL_NODE] = True
521 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
522 else:
523 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
524 nodelist = [self.op.pnode]
525 if self.op.snode is not None:
526 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
527 nodelist.append(self.op.snode)
528 self.needed_locks[locking.LEVEL_NODE] = nodelist
529
530 # in case of import lock the source node too
531 if self.op.mode == constants.INSTANCE_IMPORT:
532 src_node = self.op.src_node
533 src_path = self.op.src_path
534
535 if src_path is None:
536 self.op.src_path = src_path = self.op.instance_name
537
538 if src_node is None:
539 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
540 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
541 self.op.src_node = None
542 if os.path.isabs(src_path):
543 raise errors.OpPrereqError("Importing an instance from a path"
544 " requires a source node option",
545 errors.ECODE_INVAL)
546 else:
547 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
548 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
549 self.needed_locks[locking.LEVEL_NODE].append(src_node)
550 if not os.path.isabs(src_path):
551 self.op.src_path = src_path = \
552 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
553
554 self.needed_locks[locking.LEVEL_NODE_RES] = \
555 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
556
557 def _RunAllocator(self):
558 """Run the allocator based on input opcode.
559
560 """
561 if self.op.opportunistic_locking:
562 # Only consider nodes for which a lock is held
563 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
564 else:
565 node_whitelist = None
566
567 #TODO Export network to iallocator so that it chooses a pnode
568 # in a nodegroup that has the desired network connected to
569 req = _CreateInstanceAllocRequest(self.op, self.disks,
570 self.nics, self.be_full,
571 node_whitelist)
572 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
573
574 ial.Run(self.op.iallocator)
575
576 if not ial.success:
577 # When opportunistic locks are used only a temporary failure is generated
578 if self.op.opportunistic_locking:
579 ecode = errors.ECODE_TEMP_NORES
580 else:
581 ecode = errors.ECODE_NORES
582
583 raise errors.OpPrereqError("Can't compute nodes using"
584 " iallocator '%s': %s" %
585 (self.op.iallocator, ial.info),
586 ecode)
587
588 self.op.pnode = ial.result[0]
589 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
590 self.op.instance_name, self.op.iallocator,
591 utils.CommaJoin(ial.result))
592
593 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
594
595 if req.RequiredNodes() == 2:
596 self.op.snode = ial.result[1]
597
598 def BuildHooksEnv(self):
599 """Build hooks env.
600
601 This runs on master, primary and secondary nodes of the instance.
602
603 """
604 env = {
605 "ADD_MODE": self.op.mode,
606 }
607 if self.op.mode == constants.INSTANCE_IMPORT:
608 env["SRC_NODE"] = self.op.src_node
609 env["SRC_PATH"] = self.op.src_path
610 env["SRC_IMAGES"] = self.src_images
611
612 env.update(BuildInstanceHookEnv(
613 name=self.op.instance_name,
614 primary_node=self.op.pnode,
615 secondary_nodes=self.secondaries,
616 status=self.op.start,
617 os_type=self.op.os_type,
618 minmem=self.be_full[constants.BE_MINMEM],
619 maxmem=self.be_full[constants.BE_MAXMEM],
620 vcpus=self.be_full[constants.BE_VCPUS],
621 nics=NICListToTuple(self, self.nics),
622 disk_template=self.op.disk_template,
623 disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
624 d[constants.IDISK_MODE]) for d in self.disks],
625 bep=self.be_full,
626 hvp=self.hv_full,
627 hypervisor_name=self.op.hypervisor,
628 tags=self.op.tags,
629 ))
630
631 return env
632
633 def BuildHooksNodes(self):
634 """Build hooks nodes.
635
636 """
637 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
638 return nl, nl
639
640 def _ReadExportInfo(self):
641 """Reads the export information from disk.
642
643 It will override the opcode source node and path with the actual
644 information, if these two were not specified before.
645
646 @return: the export information
647
648 """
649 assert self.op.mode == constants.INSTANCE_IMPORT
650
651 src_node = self.op.src_node
652 src_path = self.op.src_path
653
654 if src_node is None:
655 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
656 exp_list = self.rpc.call_export_list(locked_nodes)
657 found = False
658 for node in exp_list:
659 if exp_list[node].fail_msg:
660 continue
661 if src_path in exp_list[node].payload:
662 found = True
663 self.op.src_node = src_node = node
664 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
665 src_path)
666 break
667 if not found:
668 raise errors.OpPrereqError("No export found for relative path %s" %
669 src_path, errors.ECODE_INVAL)
670
671 CheckNodeOnline(self, src_node)
672 result = self.rpc.call_export_info(src_node, src_path)
673 result.Raise("No export or invalid export found in dir %s" % src_path)
674
675 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
676 if not export_info.has_section(constants.INISECT_EXP):
677 raise errors.ProgrammerError("Corrupted export config",
678 errors.ECODE_ENVIRON)
679
680 ei_version = export_info.get(constants.INISECT_EXP, "version")
681 if (int(ei_version) != constants.EXPORT_VERSION):
682 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
683 (ei_version, constants.EXPORT_VERSION),
684 errors.ECODE_ENVIRON)
685 return export_info
686
687 def _ReadExportParams(self, einfo):
688 """Use export parameters as defaults.
689
690 In case the opcode doesn't specify (as in override) some instance
691 parameters, then try to use them from the export information, if
692 that declares them.
693
694 """
695 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
696
697 if self.op.disk_template is None:
698 if einfo.has_option(constants.INISECT_INS, "disk_template"):
699 self.op.disk_template = einfo.get(constants.INISECT_INS,
700 "disk_template")
701 if self.op.disk_template not in constants.DISK_TEMPLATES:
702 raise errors.OpPrereqError("Disk template specified in configuration"
703 " file is not one of the allowed values:"
704 " %s" %
705 " ".join(constants.DISK_TEMPLATES),
706 errors.ECODE_INVAL)
707 else:
708 raise errors.OpPrereqError("No disk template specified and the export"
709 " is missing the disk_template information",
710 errors.ECODE_INVAL)
711
712 if not self.op.disks:
713 disks = []
714 # TODO: import the disk iv_name too
715 for idx in range(constants.MAX_DISKS):
716 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
717 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
718 disks.append({constants.IDISK_SIZE: disk_sz})
719 self.op.disks = disks
720 if not disks and self.op.disk_template != constants.DT_DISKLESS:
721 raise errors.OpPrereqError("No disk info specified and the export"
722 " is missing the disk information",
723 errors.ECODE_INVAL)
724
725 if not self.op.nics:
726 nics = []
727 for idx in range(constants.MAX_NICS):
728 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
729 ndict = {}
730 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
731 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
732 ndict[name] = v
733 nics.append(ndict)
734 else:
735 break
736 self.op.nics = nics
737
738 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
739 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
740
741 if (self.op.hypervisor is None and
742 einfo.has_option(constants.INISECT_INS, "hypervisor")):
743 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
744
745 if einfo.has_section(constants.INISECT_HYP):
746 # use the export parameters but do not override the ones
747 # specified by the user
748 for name, value in einfo.items(constants.INISECT_HYP):
749 if name not in self.op.hvparams:
750 self.op.hvparams[name] = value
751
752 if einfo.has_section(constants.INISECT_BEP):
753 # use the parameters, without overriding
754 for name, value in einfo.items(constants.INISECT_BEP):
755 if name not in self.op.beparams:
756 self.op.beparams[name] = value
757 # Compatibility for the old "memory" be param
758 if name == constants.BE_MEMORY:
759 if constants.BE_MAXMEM not in self.op.beparams:
760 self.op.beparams[constants.BE_MAXMEM] = value
761 if constants.BE_MINMEM not in self.op.beparams:
762 self.op.beparams[constants.BE_MINMEM] = value
763 else:
764 # try to read the parameters old style, from the main section
765 for name in constants.BES_PARAMETERS:
766 if (name not in self.op.beparams and
767 einfo.has_option(constants.INISECT_INS, name)):
768 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
769
770 if einfo.has_section(constants.INISECT_OSP):
771 # use the parameters, without overriding
772 for name, value in einfo.items(constants.INISECT_OSP):
773 if name not in self.op.osparams:
774 self.op.osparams[name] = value
775
776 def _RevertToDefaults(self, cluster):
777 """Revert the instance parameters to the default values.
778
779 """
780 # hvparams
781 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
782 for name in self.op.hvparams.keys():
783 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
784 del self.op.hvparams[name]
785 # beparams
786 be_defs = cluster.SimpleFillBE({})
787 for name in self.op.beparams.keys():
788 if name in be_defs and be_defs[name] == self.op.beparams[name]:
789 del self.op.beparams[name]
790 # nic params
791 nic_defs = cluster.SimpleFillNIC({})
792 for nic in self.op.nics:
793 for name in constants.NICS_PARAMETERS:
794 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
795 del nic[name]
796 # osparams
797 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
798 for name in self.op.osparams.keys():
799 if name in os_defs and os_defs[name] == self.op.osparams[name]:
800 del self.op.osparams[name]
801
802 def _CalculateFileStorageDir(self):
803 """Calculate final instance file storage dir.
804
805 """
806 # file storage dir calculation/check
807 self.instance_file_storage_dir = None
808 if self.op.disk_template in constants.DTS_FILEBASED:
809 # build the full file storage dir path
810 joinargs = []
811
812 if self.op.disk_template == constants.DT_SHARED_FILE:
813 get_fsd_fn = self.cfg.GetSharedFileStorageDir
814 else:
815 get_fsd_fn = self.cfg.GetFileStorageDir
816
817 cfg_storagedir = get_fsd_fn()
818 if not cfg_storagedir:
819 raise errors.OpPrereqError("Cluster file storage dir not defined",
820 errors.ECODE_STATE)
821 joinargs.append(cfg_storagedir)
822
823 if self.op.file_storage_dir is not None:
824 joinargs.append(self.op.file_storage_dir)
825
826 joinargs.append(self.op.instance_name)
827
828 # pylint: disable=W0142
829 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
830
831 def CheckPrereq(self): # pylint: disable=R0914
832 """Check prerequisites.
833
834 """
835 self._CalculateFileStorageDir()
836
837 if self.op.mode == constants.INSTANCE_IMPORT:
838 export_info = self._ReadExportInfo()
839 self._ReadExportParams(export_info)
840 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
841 else:
842 self._old_instance_name = None
843
844 if (not self.cfg.GetVGName() and
845 self.op.disk_template not in constants.DTS_NOT_LVM):
846 raise errors.OpPrereqError("Cluster does not support lvm-based"
847 " instances", errors.ECODE_STATE)
848
849 if (self.op.hypervisor is None or
850 self.op.hypervisor == constants.VALUE_AUTO):
851 self.op.hypervisor = self.cfg.GetHypervisorType()
852
853 cluster = self.cfg.GetClusterInfo()
854 enabled_hvs = cluster.enabled_hypervisors
855 if self.op.hypervisor not in enabled_hvs:
856 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
857 " cluster (%s)" %
858 (self.op.hypervisor, ",".join(enabled_hvs)),
859 errors.ECODE_STATE)
860
861 # Check tag validity
862 for tag in self.op.tags:
863 objects.TaggableObject.ValidateTag(tag)
864
865 # check hypervisor parameter syntax (locally)
866 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
867 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
868 self.op.hvparams)
869 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
870 hv_type.CheckParameterSyntax(filled_hvp)
871 self.hv_full = filled_hvp
872 # check that we don't specify global parameters on an instance
873 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
874 "instance", "cluster")
875
876 # fill and remember the beparams dict
877 self.be_full = _ComputeFullBeParams(self.op, cluster)
878
879 # build os parameters
880 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
881
882 # now that hvp/bep are in final format, let's reset to defaults,
883 # if told to do so
884 if self.op.identify_defaults:
885 self._RevertToDefaults(cluster)
886
887 # NIC buildup
888 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
889 self.proc.GetECId())
890
891 # disk checks/pre-build
892 default_vg = self.cfg.GetVGName()
893 self.disks = ComputeDisks(self.op, default_vg)
894
895 if self.op.mode == constants.INSTANCE_IMPORT:
896 disk_images = []
897 for idx in range(len(self.disks)):
898 option = "disk%d_dump" % idx
899 if export_info.has_option(constants.INISECT_INS, option):
900 # FIXME: are the old os-es, disk sizes, etc. useful?
901 export_name = export_info.get(constants.INISECT_INS, option)
902 image = utils.PathJoin(self.op.src_path, export_name)
903 disk_images.append(image)
904 else:
905 disk_images.append(False)
906
907 self.src_images = disk_images
908
909 if self.op.instance_name == self._old_instance_name:
910 for idx, nic in enumerate(self.nics):
911 if nic.mac == constants.VALUE_AUTO:
912 nic_mac_ini = "nic%d_mac" % idx
913 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
914
915 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
916
917 # ip ping checks (we use the same ip that was resolved in ExpandNames)
918 if self.op.ip_check:
919 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
920 raise errors.OpPrereqError("IP %s of instance %s already in use" %
921 (self.check_ip, self.op.instance_name),
922 errors.ECODE_NOTUNIQUE)
923
924 #### mac address generation
925 # By generating here the mac address both the allocator and the hooks get
926 # the real final mac address rather than the 'auto' or 'generate' value.
927 # There is a race condition between the generation and the instance object
928 # creation, which means that we know the mac is valid now, but we're not
929 # sure it will be when we actually add the instance. If things go bad
930 # adding the instance will abort because of a duplicate mac, and the
931 # creation job will fail.
932 for nic in self.nics:
933 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
934 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
935
936 #### allocator run
937
938 if self.op.iallocator is not None:
939 self._RunAllocator()
940
941 # Release all unneeded node locks
942 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
943 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
944 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
945 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
946
947 assert (self.owned_locks(locking.LEVEL_NODE) ==
948 self.owned_locks(locking.LEVEL_NODE_RES)), \
949 "Node locks differ from node resource locks"
950
951 #### node related checks
952
953 # check primary node
954 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
955 assert self.pnode is not None, \
956 "Cannot retrieve locked node %s" % self.op.pnode
957 if pnode.offline:
958 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
959 pnode.name, errors.ECODE_STATE)
960 if pnode.drained:
961 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
962 pnode.name, errors.ECODE_STATE)
963 if not pnode.vm_capable:
964 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
965 " '%s'" % pnode.name, errors.ECODE_STATE)
966
967 self.secondaries = []
968
969 # Fill in any IPs from IP pools. This must happen here, because we need to
970 # know the nic's primary node, as specified by the iallocator
971 for idx, nic in enumerate(self.nics):
972 net_uuid = nic.network
973 if net_uuid is not None:
974 nobj = self.cfg.GetNetwork(net_uuid)
975 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
976 if netparams is None:
977 raise errors.OpPrereqError("No netparams found for network"
978 " %s. Propably not connected to"
979 " node's %s nodegroup" %
980 (nobj.name, self.pnode.name),
981 errors.ECODE_INVAL)
982 self.LogInfo("NIC/%d inherits netparams %s" %
983 (idx, netparams.values()))
984 nic.nicparams = dict(netparams)
985 if nic.ip is not None:
986 if nic.ip.lower() == constants.NIC_IP_POOL:
987 try:
988 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
989 except errors.ReservationError:
990 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
991 " from the address pool" % idx,
992 errors.ECODE_STATE)
993 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
994 else:
995 try:
996 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
997 except errors.ReservationError:
998 raise errors.OpPrereqError("IP address %s already in use"
999 " or does not belong to network %s" %
1000 (nic.ip, nobj.name),
1001 errors.ECODE_NOTUNIQUE)
1002
1003 # net is None, ip None or given
1004 elif self.op.conflicts_check:
1005 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1006
1007 # mirror node verification
1008 if self.op.disk_template in constants.DTS_INT_MIRROR:
1009 if self.op.snode == pnode.name:
1010 raise errors.OpPrereqError("The secondary node cannot be the"
1011 " primary node", errors.ECODE_INVAL)
1012 CheckNodeOnline(self, self.op.snode)
1013 CheckNodeNotDrained(self, self.op.snode)
1014 CheckNodeVmCapable(self, self.op.snode)
1015 self.secondaries.append(self.op.snode)
1016
1017 snode = self.cfg.GetNodeInfo(self.op.snode)
1018 if pnode.group != snode.group:
1019 self.LogWarning("The primary and secondary nodes are in two"
1020 " different node groups; the disk parameters"
1021 " from the first disk's node group will be"
1022 " used")
1023
1024 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1025 nodes = [pnode]
1026 if self.op.disk_template in constants.DTS_INT_MIRROR:
1027 nodes.append(snode)
1028 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1029 if compat.any(map(has_es, nodes)):
1030 raise errors.OpPrereqError("Disk template %s not supported with"
1031 " exclusive storage" % self.op.disk_template,
1032 errors.ECODE_STATE)
1033
1034 nodenames = [pnode.name] + self.secondaries
1035
1036 if not self.adopt_disks:
1037 if self.op.disk_template == constants.DT_RBD:
1038 # _CheckRADOSFreeSpace() is just a placeholder.
1039 # Any function that checks prerequisites can be placed here.
1040 # Check if there is enough space on the RADOS cluster.
1041 CheckRADOSFreeSpace()
1042 elif self.op.disk_template == constants.DT_EXT:
1043 # FIXME: Function that checks prereqs if needed
1044 pass
1045 else:
1046 # Check lv size requirements, if not adopting
1047 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1048 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1049
1050 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1051 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1052 disk[constants.IDISK_ADOPT])
1053 for disk in self.disks])
1054 if len(all_lvs) != len(self.disks):
1055 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1056 errors.ECODE_INVAL)
1057 for lv_name in all_lvs:
1058 try:
1059 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1060 # to ReserveLV uses the same syntax
1061 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1062 except errors.ReservationError:
1063 raise errors.OpPrereqError("LV named %s used by another instance" %
1064 lv_name, errors.ECODE_NOTUNIQUE)
1065
1066 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1067 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1068
1069 node_lvs = self.rpc.call_lv_list([pnode.name],
1070 vg_names.payload.keys())[pnode.name]
1071 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1072 node_lvs = node_lvs.payload
1073
1074 delta = all_lvs.difference(node_lvs.keys())
1075 if delta:
1076 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1077 utils.CommaJoin(delta),
1078 errors.ECODE_INVAL)
1079 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1080 if online_lvs:
1081 raise errors.OpPrereqError("Online logical volumes found, cannot"
1082 " adopt: %s" % utils.CommaJoin(online_lvs),
1083 errors.ECODE_STATE)
1084 # update the size of disk based on what is found
1085 for dsk in self.disks:
1086 dsk[constants.IDISK_SIZE] = \
1087 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1088 dsk[constants.IDISK_ADOPT])][0]))
1089
1090 elif self.op.disk_template == constants.DT_BLOCK:
1091 # Normalize and de-duplicate device paths
1092 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1093 for disk in self.disks])
1094 if len(all_disks) != len(self.disks):
1095 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1096 errors.ECODE_INVAL)
1097 baddisks = [d for d in all_disks
1098 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1099 if baddisks:
1100 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1101 " cannot be adopted" %
1102 (utils.CommaJoin(baddisks),
1103 constants.ADOPTABLE_BLOCKDEV_ROOT),
1104 errors.ECODE_INVAL)
1105
1106 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1107 list(all_disks))[pnode.name]
1108 node_disks.Raise("Cannot get block device information from node %s" %
1109 pnode.name)
1110 node_disks = node_disks.payload
1111 delta = all_disks.difference(node_disks.keys())
1112 if delta:
1113 raise errors.OpPrereqError("Missing block device(s): %s" %
1114 utils.CommaJoin(delta),
1115 errors.ECODE_INVAL)
1116 for dsk in self.disks:
1117 dsk[constants.IDISK_SIZE] = \
1118 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1119
1120 # Verify instance specs
1121 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1122 ispec = {
1123 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1124 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1125 constants.ISPEC_DISK_COUNT: len(self.disks),
1126 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1127 for disk in self.disks],
1128 constants.ISPEC_NIC_COUNT: len(self.nics),
1129 constants.ISPEC_SPINDLE_USE: spindle_use,
1130 }
1131
1132 group_info = self.cfg.GetNodeGroup(pnode.group)
1133 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1134 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1135 self.op.disk_template)
1136 if not self.op.ignore_ipolicy and res:
1137 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1138 (pnode.group, group_info.name, utils.CommaJoin(res)))
1139 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1140
1141 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1142
1143 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1144 # check OS parameters (remotely)
1145 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1146
1147 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1148
1149 #TODO: _CheckExtParams (remotely)
1150 # Check parameters for extstorage
1151
1152 # memory check on primary node
1153 #TODO(dynmem): use MINMEM for checking
1154 if self.op.start:
1155 CheckNodeFreeMemory(self, self.pnode.name,
1156 "creating instance %s" % self.op.instance_name,
1157 self.be_full[constants.BE_MAXMEM],
1158 self.op.hypervisor)
1159
1160 self.dry_run_result = list(nodenames)
1161
1162 def Exec(self, feedback_fn):
1163 """Create and add the instance to the cluster.
1164
1165 """
1166 instance = self.op.instance_name
1167 pnode_name = self.pnode.name
1168
1169 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1170 self.owned_locks(locking.LEVEL_NODE)), \
1171 "Node locks differ from node resource locks"
1172 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1173
1174 ht_kind = self.op.hypervisor
1175 if ht_kind in constants.HTS_REQ_PORT:
1176 network_port = self.cfg.AllocatePort()
1177 else:
1178 network_port = None
1179
1180 # This is ugly but we got a chicken-egg problem here
1181 # We can only take the group disk parameters, as the instance
1182 # has no disks yet (we are generating them right here).
1183 node = self.cfg.GetNodeInfo(pnode_name)
1184 nodegroup = self.cfg.GetNodeGroup(node.group)
1185 disks = GenerateDiskTemplate(self,
1186 self.op.disk_template,
1187 instance, pnode_name,
1188 self.secondaries,
1189 self.disks,
1190 self.instance_file_storage_dir,
1191 self.op.file_driver,
1192 0,
1193 feedback_fn,
1194 self.cfg.GetGroupDiskParams(nodegroup))
1195
1196 iobj = objects.Instance(name=instance, os=self.op.os_type,
1197 primary_node=pnode_name,
1198 nics=self.nics, disks=disks,
1199 disk_template=self.op.disk_template,
1200 disks_active=False,
1201 admin_state=constants.ADMINST_DOWN,
1202 network_port=network_port,
1203 beparams=self.op.beparams,
1204 hvparams=self.op.hvparams,
1205 hypervisor=self.op.hypervisor,
1206 osparams=self.op.osparams,
1207 )
1208
1209 if self.op.tags:
1210 for tag in self.op.tags:
1211 iobj.AddTag(tag)
1212
1213 if self.adopt_disks:
1214 if self.op.disk_template == constants.DT_PLAIN:
1215 # rename LVs to the newly-generated names; we need to construct
1216 # 'fake' LV disks with the old data, plus the new unique_id
1217 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1218 rename_to = []
1219 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1220 rename_to.append(t_dsk.logical_id)
1221 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1222 self.cfg.SetDiskID(t_dsk, pnode_name)
1223 result = self.rpc.call_blockdev_rename(pnode_name,
1224 zip(tmp_disks, rename_to))
1225 result.Raise("Failed to rename adoped LVs")
1226 else:
1227 feedback_fn("* creating instance disks...")
1228 try:
1229 CreateDisks(self, iobj)
1230 except errors.OpExecError:
1231 self.LogWarning("Device creation failed")
1232 self.cfg.ReleaseDRBDMinors(instance)
1233 raise
1234
1235 feedback_fn("adding instance %s to cluster config" % instance)
1236
1237 self.cfg.AddInstance(iobj, self.proc.GetECId())
1238
1239 # Declare that we don't want to remove the instance lock anymore, as we've
1240 # added the instance to the config
1241 del self.remove_locks[locking.LEVEL_INSTANCE]
1242
1243 if self.op.mode == constants.INSTANCE_IMPORT:
1244 # Release unused nodes
1245 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1246 else:
1247 # Release all nodes
1248 ReleaseLocks(self, locking.LEVEL_NODE)
1249
1250 disk_abort = False
1251 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1252 feedback_fn("* wiping instance disks...")
1253 try:
1254 WipeDisks(self, iobj)
1255 except errors.OpExecError, err:
1256 logging.exception("Wiping disks failed")
1257 self.LogWarning("Wiping instance disks failed (%s)", err)
1258 disk_abort = True
1259
1260 if disk_abort:
1261 # Something is already wrong with the disks, don't do anything else
1262 pass
1263 elif self.op.wait_for_sync:
1264 disk_abort = not WaitForSync(self, iobj)
1265 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1266 # make sure the disks are not degraded (still sync-ing is ok)
1267 feedback_fn("* checking mirrors status")
1268 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1269 else:
1270 disk_abort = False
1271
1272 if disk_abort:
1273 RemoveDisks(self, iobj)
1274 self.cfg.RemoveInstance(iobj.name)
1275 # Make sure the instance lock gets removed
1276 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1277 raise errors.OpExecError("There are some degraded disks for"
1278 " this instance")
1279
1280 # instance disks are now active
1281 iobj.disks_active = True
1282
1283 # Release all node resource locks
1284 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1285
1286 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1287 # we need to set the disks ID to the primary node, since the
1288 # preceding code might or might have not done it, depending on
1289 # disk template and other options
1290 for disk in iobj.disks:
1291 self.cfg.SetDiskID(disk, pnode_name)
1292 if self.op.mode == constants.INSTANCE_CREATE:
1293 if not self.op.no_install:
1294 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1295 not self.op.wait_for_sync)
1296 if pause_sync:
1297 feedback_fn("* pausing disk sync to install instance OS")
1298 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1299 (iobj.disks,
1300 iobj), True)
1301 for idx, success in enumerate(result.payload):
1302 if not success:
1303 logging.warn("pause-sync of instance %s for disk %d failed",
1304 instance, idx)
1305
1306 feedback_fn("* running the instance OS create scripts...")
1307 # FIXME: pass debug option from opcode to backend
1308 os_add_result = \
1309 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1310 self.op.debug_level)
1311 if pause_sync:
1312 feedback_fn("* resuming disk sync")
1313 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1314 (iobj.disks,
1315 iobj), False)
1316 for idx, success in enumerate(result.payload):
1317 if not success:
1318 logging.warn("resume-sync of instance %s for disk %d failed",
1319 instance, idx)
1320
1321 os_add_result.Raise("Could not add os for instance %s"
1322 " on node %s" % (instance, pnode_name))
1323
1324 else:
1325 if self.op.mode == constants.INSTANCE_IMPORT:
1326 feedback_fn("* running the instance OS import scripts...")
1327
1328 transfers = []
1329
1330 for idx, image in enumerate(self.src_images):
1331 if not image:
1332 continue
1333
1334 # FIXME: pass debug option from opcode to backend
1335 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1336 constants.IEIO_FILE, (image, ),
1337 constants.IEIO_SCRIPT,
1338 (iobj.disks[idx], idx),
1339 None)
1340 transfers.append(dt)
1341
1342 import_result = \
1343 masterd.instance.TransferInstanceData(self, feedback_fn,
1344 self.op.src_node, pnode_name,
1345 self.pnode.secondary_ip,
1346 iobj, transfers)
1347 if not compat.all(import_result):
1348 self.LogWarning("Some disks for instance %s on node %s were not"
1349 " imported successfully" % (instance, pnode_name))
1350
1351 rename_from = self._old_instance_name
1352
1353 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1354 feedback_fn("* preparing remote import...")
1355 # The source cluster will stop the instance before attempting to make
1356 # a connection. In some cases stopping an instance can take a long
1357 # time, hence the shutdown timeout is added to the connection
1358 # timeout.
1359 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1360 self.op.source_shutdown_timeout)
1361 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1362
1363 assert iobj.primary_node == self.pnode.name
1364 disk_results = \
1365 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1366 self.source_x509_ca,
1367 self._cds, timeouts)
1368 if not compat.all(disk_results):
1369 # TODO: Should the instance still be started, even if some disks
1370 # failed to import (valid for local imports, too)?
1371 self.LogWarning("Some disks for instance %s on node %s were not"
1372 " imported successfully" % (instance, pnode_name))
1373
1374 rename_from = self.source_instance_name
1375
1376 else:
1377 # also checked in the prereq part
1378 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1379 % self.op.mode)
1380
1381 # Run rename script on newly imported instance
1382 assert iobj.name == instance
1383 feedback_fn("Running rename script for %s" % instance)
1384 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1385 rename_from,
1386 self.op.debug_level)
1387 if result.fail_msg:
1388 self.LogWarning("Failed to run rename script for %s on node"
1389 " %s: %s" % (instance, pnode_name, result.fail_msg))
1390
1391 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1392
1393 if self.op.start:
1394 iobj.admin_state = constants.ADMINST_UP
1395 self.cfg.Update(iobj, feedback_fn)
1396 logging.info("Starting instance %s on node %s", instance, pnode_name)
1397 feedback_fn("* starting instance...")
1398 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1399 False, self.op.reason)
1400 result.Raise("Could not start instance")
1401
1402 return list(iobj.all_nodes)
1403
1404
1405 class LUInstanceRename(LogicalUnit):
1406 """Rename an instance.
1407
1408 """
1409 HPATH = "instance-rename"
1410 HTYPE = constants.HTYPE_INSTANCE
1411
1412 def CheckArguments(self):
1413 """Check arguments.
1414
1415 """
1416 if self.op.ip_check and not self.op.name_check:
1417 # TODO: make the ip check more flexible and not depend on the name check
1418 raise errors.OpPrereqError("IP address check requires a name check",
1419 errors.ECODE_INVAL)
1420
1421 def BuildHooksEnv(self):
1422 """Build hooks env.
1423
1424 This runs on master, primary and secondary nodes of the instance.
1425
1426 """
1427 env = BuildInstanceHookEnvByObject(self, self.instance)
1428 env["INSTANCE_NEW_NAME"] = self.op.new_name
1429 return env
1430
1431 def BuildHooksNodes(self):
1432 """Build hooks nodes.
1433
1434 """
1435 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1436 return (nl, nl)
1437
1438 def CheckPrereq(self):
1439 """Check prerequisites.
1440
1441 This checks that the instance is in the cluster and is not running.
1442
1443 """
1444 self.op.instance_name = ExpandInstanceName(self.cfg,
1445 self.op.instance_name)
1446 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1447 assert instance is not None
1448 CheckNodeOnline(self, instance.primary_node)
1449 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1450 msg="cannot rename")
1451 self.instance = instance
1452
1453 new_name = self.op.new_name
1454 if self.op.name_check:
1455 hostname = _CheckHostnameSane(self, new_name)
1456 new_name = self.op.new_name = hostname.name
1457 if (self.op.ip_check and
1458 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1459 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1460 (hostname.ip, new_name),
1461 errors.ECODE_NOTUNIQUE)
1462
1463 instance_list = self.cfg.GetInstanceList()
1464 if new_name in instance_list and new_name != instance.name:
1465 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1466 new_name, errors.ECODE_EXISTS)
1467
1468 def Exec(self, feedback_fn):
1469 """Rename the instance.
1470
1471 """
1472 inst = self.instance
1473 old_name = inst.name
1474
1475 rename_file_storage = False
1476 if (inst.disk_template in constants.DTS_FILEBASED and
1477 self.op.new_name != inst.name):
1478 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1479 rename_file_storage = True
1480
1481 self.cfg.RenameInstance(inst.name, self.op.new_name)
1482 # Change the instance lock. This is definitely safe while we hold the BGL.
1483 # Otherwise the new lock would have to be added in acquired mode.
1484 assert self.REQ_BGL
1485 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1486 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1487 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1488
1489 # re-read the instance from the configuration after rename
1490 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1491
1492 if rename_file_storage:
1493 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1494 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1495 old_file_storage_dir,
1496 new_file_storage_dir)
1497 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1498 " (but the instance has been renamed in Ganeti)" %
1499 (inst.primary_node, old_file_storage_dir,
1500 new_file_storage_dir))
1501
1502 StartInstanceDisks(self, inst, None)
1503 # update info on disks
1504 info = GetInstanceInfoText(inst)
1505 for (idx, disk) in enumerate(inst.disks):
1506 for node in inst.all_nodes:
1507 self.cfg.SetDiskID(disk, node)
1508 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1509 if result.fail_msg:
1510 self.LogWarning("Error setting info on node %s for disk %s: %s",
1511 node, idx, result.fail_msg)
1512 try:
1513 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1514 old_name, self.op.debug_level)
1515 msg = result.fail_msg
1516 if msg:
1517 msg = ("Could not run OS rename script for instance %s on node %s"
1518 " (but the instance has been renamed in Ganeti): %s" %
1519 (inst.name, inst.primary_node, msg))
1520 self.LogWarning(msg)
1521 finally:
1522 ShutdownInstanceDisks(self, inst)
1523
1524 return inst.name
1525
1526
1527 class LUInstanceRemove(LogicalUnit):
1528 """Remove an instance.
1529
1530 """
1531 HPATH = "instance-remove"
1532 HTYPE = constants.HTYPE_INSTANCE
1533 REQ_BGL = False
1534
1535 def ExpandNames(self):
1536 self._ExpandAndLockInstance()
1537 self.needed_locks[locking.LEVEL_NODE] = []
1538 self.needed_locks[locking.LEVEL_NODE_RES] = []
1539 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1540
1541 def DeclareLocks(self, level):
1542 if level == locking.LEVEL_NODE:
1543 self._LockInstancesNodes()
1544 elif level == locking.LEVEL_NODE_RES:
1545 # Copy node locks
1546 self.needed_locks[locking.LEVEL_NODE_RES] = \
1547 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1548
1549 def BuildHooksEnv(self):
1550 """Build hooks env.
1551
1552 This runs on master, primary and secondary nodes of the instance.
1553
1554 """
1555 env = BuildInstanceHookEnvByObject(self, self.instance)
1556 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1557 return env
1558
1559 def BuildHooksNodes(self):
1560 """Build hooks nodes.
1561
1562 """
1563 nl = [self.cfg.GetMasterNode()]
1564 nl_post = list(self.instance.all_nodes) + nl
1565 return (nl, nl_post)
1566
1567 def CheckPrereq(self):
1568 """Check prerequisites.
1569
1570 This checks that the instance is in the cluster.
1571
1572 """
1573 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1574 assert self.instance is not None, \
1575 "Cannot retrieve locked instance %s" % self.op.instance_name
1576
1577 def Exec(self, feedback_fn):
1578 """Remove the instance.
1579
1580 """
1581 instance = self.instance
1582 logging.info("Shutting down instance %s on node %s",
1583 instance.name, instance.primary_node)
1584
1585 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1586 self.op.shutdown_timeout,
1587 self.op.reason)
1588 msg = result.fail_msg
1589 if msg:
1590 if self.op.ignore_failures:
1591 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1592 else:
1593 raise errors.OpExecError("Could not shutdown instance %s on"
1594 " node %s: %s" %
1595 (instance.name, instance.primary_node, msg))
1596
1597 assert (self.owned_locks(locking.LEVEL_NODE) ==
1598 self.owned_locks(locking.LEVEL_NODE_RES))
1599 assert not (set(instance.all_nodes) -
1600 self.owned_locks(locking.LEVEL_NODE)), \
1601 "Not owning correct locks"
1602
1603 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1604
1605
1606 class LUInstanceMove(LogicalUnit):
1607 """Move an instance by data-copying.
1608
1609 """
1610 HPATH = "instance-move"
1611 HTYPE = constants.HTYPE_INSTANCE
1612 REQ_BGL = False
1613
1614 def ExpandNames(self):
1615 self._ExpandAndLockInstance()
1616 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1617 self.op.target_node = target_node
1618 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1619 self.needed_locks[locking.LEVEL_NODE_RES] = []
1620 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1621
1622 def DeclareLocks(self, level):
1623 if level == locking.LEVEL_NODE:
1624 self._LockInstancesNodes(primary_only=True)
1625 elif level == locking.LEVEL_NODE_RES:
1626 # Copy node locks
1627 self.needed_locks[locking.LEVEL_NODE_RES] = \
1628 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1629
1630 def BuildHooksEnv(self):
1631 """Build hooks env.
1632
1633 This runs on master, primary and secondary nodes of the instance.
1634
1635 """
1636 env = {
1637 "TARGET_NODE": self.op.target_node,
1638 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1639 }
1640 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1641 return env
1642
1643 def BuildHooksNodes(self):
1644 """Build hooks nodes.
1645
1646 """
1647 nl = [
1648 self.cfg.GetMasterNode(),
1649 self.instance.primary_node,
1650 self.op.target_node,
1651 ]
1652 return (nl, nl)
1653
1654 def CheckPrereq(self):
1655 """Check prerequisites.
1656
1657 This checks that the instance is in the cluster.
1658
1659 """
1660 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1661 assert self.instance is not None, \
1662 "Cannot retrieve locked instance %s" % self.op.instance_name
1663
1664 if instance.disk_template not in constants.DTS_COPYABLE:
1665 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1666 instance.disk_template, errors.ECODE_STATE)
1667
1668 node = self.cfg.GetNodeInfo(self.op.target_node)
1669 assert node is not None, \
1670 "Cannot retrieve locked node %s" % self.op.target_node
1671
1672 self.target_node = target_node = node.name
1673
1674 if target_node == instance.primary_node:
1675 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1676 (instance.name, target_node),
1677 errors.ECODE_STATE)
1678
1679 bep = self.cfg.GetClusterInfo().FillBE(instance)
1680
1681 for idx, dsk in enumerate(instance.disks):
1682 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1683 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1684 " cannot copy" % idx, errors.ECODE_STATE)
1685
1686 CheckNodeOnline(self, target_node)
1687 CheckNodeNotDrained(self, target_node)
1688 CheckNodeVmCapable(self, target_node)
1689 cluster = self.cfg.GetClusterInfo()
1690 group_info = self.cfg.GetNodeGroup(node.group)
1691 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1692 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1693 ignore=self.op.ignore_ipolicy)
1694
1695 if instance.admin_state == constants.ADMINST_UP:
1696 # check memory requirements on the secondary node
1697 CheckNodeFreeMemory(self, target_node,
1698 "failing over instance %s" %
1699 instance.name, bep[constants.BE_MAXMEM],
1700 instance.hypervisor)
1701 else:
1702 self.LogInfo("Not checking memory on the secondary node as"
1703 " instance will not be started")
1704
1705 # check bridge existance
1706 CheckInstanceBridgesExist(self, instance, node=target_node)
1707
1708 def Exec(self, feedback_fn):
1709 """Move an instance.
1710
1711 The move is done by shutting it down on its present node, copying
1712 the data over (slow) and starting it on the new node.
1713
1714 """
1715 instance = self.instance
1716
1717 source_node = instance.primary_node
1718 target_node = self.target_node
1719
1720 self.LogInfo("Shutting down instance %s on source node %s",
1721 instance.name, source_node)
1722
1723 assert (self.owned_locks(locking.LEVEL_NODE) ==
1724 self.owned_locks(locking.LEVEL_NODE_RES))
1725
1726 result = self.rpc.call_instance_shutdown(source_node, instance,
1727 self.op.shutdown_timeout,
1728 self.op.reason)
1729 msg = result.fail_msg
1730 if msg:
1731 if self.op.ignore_consistency:
1732 self.LogWarning("Could not shutdown instance %s on node %s."
1733 " Proceeding anyway. Please make sure node"
1734 " %s is down. Error details: %s",
1735 instance.name, source_node, source_node, msg)
1736 else:
1737 raise errors.OpExecError("Could not shutdown instance %s on"
1738 " node %s: %s" %
1739 (instance.name, source_node, msg))
1740
1741 # create the target disks
1742 try:
1743 CreateDisks(self, instance, target_node=target_node)
1744 except errors.OpExecError:
1745 self.LogWarning("Device creation failed")
1746 self.cfg.ReleaseDRBDMinors(instance.name)
1747 raise
1748
1749 cluster_name = self.cfg.GetClusterInfo().cluster_name
1750
1751 errs = []
1752 # activate, get path, copy the data over
1753 for idx, disk in enumerate(instance.disks):
1754 self.LogInfo("Copying data for disk %d", idx)
1755 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1756 instance.name, True, idx)
1757 if result.fail_msg:
1758 self.LogWarning("Can't assemble newly created disk %d: %s",
1759 idx, result.fail_msg)
1760 errs.append(result.fail_msg)
1761 break
1762 dev_path = result.payload
1763 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1764 target_node, dev_path,
1765 cluster_name)
1766 if result.fail_msg:
1767 self.LogWarning("Can't copy data over for disk %d: %s",
1768 idx, result.fail_msg)
1769 errs.append(result.fail_msg)
1770 break
1771
1772 if errs:
1773 self.LogWarning("Some disks failed to copy, aborting")
1774 try:
1775 RemoveDisks(self, instance, target_node=target_node)
1776 finally:
1777 self.cfg.ReleaseDRBDMinors(instance.name)
1778 raise errors.OpExecError("Errors during disk copy: %s" %
1779 (",".join(errs),))
1780
1781 instance.primary_node = target_node
1782 self.cfg.Update(instance, feedback_fn)
1783
1784 self.LogInfo("Removing the disks on the original node")
1785 RemoveDisks(self, instance, target_node=source_node)
1786
1787 # Only start the instance if it's marked as up
1788 if instance.admin_state == constants.ADMINST_UP:
1789 self.LogInfo("Starting instance %s on node %s",
1790 instance.name, target_node)
1791
1792 disks_ok, _ = AssembleInstanceDisks(self, instance,
1793 ignore_secondaries=True)
1794 if not disks_ok:
1795 ShutdownInstanceDisks(self, instance)
1796 raise errors.OpExecError("Can't activate the instance's disks")
1797
1798 result = self.rpc.call_instance_start(target_node,
1799 (instance, None, None), False,
1800 self.op.reason)
1801 msg = result.fail_msg
1802 if msg:
1803 ShutdownInstanceDisks(self, instance)
1804 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1805 (instance.name, target_node, msg))
1806
1807
1808 class LUInstanceMultiAlloc(NoHooksLU):
1809 """Allocates multiple instances at the same time.
1810
1811 """
1812 REQ_BGL = False
1813
1814 def CheckArguments(self):
1815 """Check arguments.
1816
1817 """
1818 nodes = []
1819 for inst in self.op.instances:
1820 if inst.iallocator is not None:
1821 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1822 " instance objects", errors.ECODE_INVAL)
1823 nodes.append(bool(inst.pnode))
1824 if inst.disk_template in constants.DTS_INT_MIRROR:
1825 nodes.append(bool(inst.snode))
1826
1827 has_nodes = compat.any(nodes)
1828 if compat.all(nodes) ^ has_nodes:
1829 raise errors.OpPrereqError("There are instance objects providing"
1830 " pnode/snode while others do not",
1831 errors.ECODE_INVAL)
1832
1833 if self.op.iallocator is None:
1834 default_iallocator = self.cfg.GetDefaultIAllocator()
1835 if default_iallocator and has_nodes:
1836 self.op.iallocator = default_iallocator
1837 else:
1838 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1839 " given and no cluster-wide default"
1840 " iallocator found; please specify either"
1841 " an iallocator or nodes on the instances"
1842 " or set a cluster-wide default iallocator",
1843 errors.ECODE_INVAL)
1844
1845 _CheckOpportunisticLocking(self.op)
1846
1847 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1848 if dups:
1849 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1850 utils.CommaJoin(dups), errors.ECODE_INVAL)
1851
1852 def ExpandNames(self):
1853 """Calculate the locks.
1854
1855 """
1856 self.share_locks = ShareAll()
1857 self.needed_locks = {
1858 # iallocator will select nodes and even if no iallocator is used,
1859 # collisions with LUInstanceCreate should be avoided
1860 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1861 }
1862
1863 if self.op.iallocator:
1864 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1865 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1866
1867 if self.op.opportunistic_locking:
1868 self.opportunistic_locks[locking.LEVEL_NODE] = True
1869 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1870 else:
1871 nodeslist = []
1872 for inst in self.op.instances:
1873 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1874 nodeslist.append(inst.pnode)
1875 if inst.snode is not None:
1876 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1877 nodeslist.append(inst.snode)
1878
1879 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1880 # Lock resources of instance's primary and secondary nodes (copy to
1881 # prevent accidential modification)
1882 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1883
1884 def CheckPrereq(self):
1885 """Check prerequisite.
1886
1887 """
1888 cluster = self.cfg.GetClusterInfo()
1889 default_vg = self.cfg.GetVGName()
1890 ec_id = self.proc.GetECId()
1891
1892 if self.op.opportunistic_locking:
1893 # Only consider nodes for which a lock is held
1894 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1895 else:
1896 node_whitelist = None
1897
1898 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1899 _ComputeNics(op, cluster, None,
1900 self.cfg, ec_id),
1901 _ComputeFullBeParams(op, cluster),
1902 node_whitelist)
1903 for op in self.op.instances]
1904
1905 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1906 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1907
1908 ial.Run(self.op.iallocator)
1909
1910 if not ial.success:
1911 raise errors.OpPrereqError("Can't compute nodes using"
1912 " iallocator '%s': %s" %
1913 (self.op.iallocator, ial.info),
1914 errors.ECODE_NORES)
1915
1916 self.ia_result = ial.result
1917
1918 if self.op.dry_run:
1919 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1920 constants.JOB_IDS_KEY: [],
1921 })
1922
1923 def _ConstructPartialResult(self):
1924 """Contructs the partial result.
1925
1926 """
1927 (allocatable, failed) = self.ia_result
1928 return {
1929 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1930 map(compat.fst, allocatable),
1931 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1932 }
1933
1934 def Exec(self, feedback_fn):
1935 """Executes the opcode.
1936
1937 """
1938 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1939 (allocatable, failed) = self.ia_result
1940
1941 jobs = []
1942 for (name, nodes) in allocatable:
1943 op = op2inst.pop(name)
1944
1945 if len(nodes) > 1:
1946 (op.pnode, op.snode) = nodes
1947 else:
1948 (op.pnode,) = nodes
1949
1950 jobs.append([op])
1951
1952 missing = set(op2inst.keys()) - set(failed)
1953 assert not missing, \
1954 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1955
1956 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1957
1958
1959 class _InstNicModPrivate:
1960 """Data structure for network interface modifications.
1961
1962 Used by L{LUInstanceSetParams}.
1963
1964 """
1965 def __init__(self):
1966 self.params = None
1967 self.filled = None
1968
1969
1970 def _PrepareContainerMods(mods, private_fn):
1971 """Prepares a list of container modifications by adding a private data field.
1972
1973 @type mods: list of tuples; (operation, index, parameters)
1974 @param mods: List of modifications
1975 @type private_fn: callable or None
1976 @param private_fn: Callable for constructing a private data field for a
1977 modification
1978 @rtype: list
1979
1980 """
1981 if private_fn is None:
1982 fn = lambda: None
1983 else:
1984 fn = private_fn
1985
1986 return [(op, idx, params, fn()) for (op, idx, params) in mods]
1987
1988
1989 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1990 """Checks if nodes have enough physical CPUs
1991
1992 This function checks if all given nodes have the needed number of
1993 physical CPUs. In case any node has less CPUs or we cannot get the
1994 information from the node, this function raises an OpPrereqError
1995 exception.
1996
1997 @type lu: C{LogicalUnit}
1998 @param lu: a logical unit from which we get configuration data
1999 @type nodenames: C{list}
2000 @param nodenames: the list of node names to check
2001 @type requested: C{int}
2002 @param requested: the minimum acceptable number of physical CPUs
2003 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2004 or we cannot check the node
2005
2006 """
2007 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2008 for node in nodenames:
2009 info = nodeinfo[node]
2010 info.Raise("Cannot get current information from node %s" % node,
2011 prereq=True, ecode=errors.ECODE_ENVIRON)
2012 (_, _, (hv_info, )) = info.payload
2013 num_cpus = hv_info.get("cpu_total", None)
2014 if not isinstance(num_cpus, int):
2015 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2016 " on node %s, result was '%s'" %
2017 (node, num_cpus), errors.ECODE_ENVIRON)
2018 if requested > num_cpus:
2019 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2020 "required" % (node, num_cpus, requested),
2021 errors.ECODE_NORES)
2022
2023
2024 def GetItemFromContainer(identifier, kind, container):
2025 """Return the item refered by the identifier.
2026
2027 @type identifier: string
2028 @param identifier: Item index or name or UUID
2029 @type kind: string
2030 @param kind: One-word item description
2031 @type container: list
2032 @param container: Container to get the item from
2033
2034 """
2035 # Index
2036 try:
2037 idx = int(identifier)
2038 if idx == -1:
2039 # Append
2040 absidx = len(container) - 1
2041 elif idx < 0:
2042 raise IndexError("Not accepting negative indices other than -1")
2043 elif idx > len(container):
2044 raise IndexError("Got %s index %s, but there are only %s" %
2045 (kind, idx, len(container)))
2046 else:
2047 absidx = idx
2048 return (absidx, container[idx])
2049 except ValueError:
2050 pass
2051
2052 for idx, item in enumerate(container):
2053 if item.uuid == identifier or item.name == identifier:
2054 return (idx, item)
2055
2056 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2057 (kind, identifier), errors.ECODE_NOENT)
2058
2059
2060 def _ApplyContainerMods(kind, container, chgdesc, mods,
2061 create_fn, modify_fn, remove_fn):
2062 """Applies descriptions in C{mods} to C{container}.
2063
2064 @type kind: string
2065 @param kind: One-word item description
2066 @type container: list
2067 @param container: Container to modify
2068 @type chgdesc: None or list
2069 @param chgdesc: List of applied changes
2070 @type mods: list
2071 @param mods: Modifications as returned by L{_PrepareContainerMods}
2072 @type create_fn: callable
2073 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2074 receives absolute item index, parameters and private data object as added
2075 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2076 as list
2077 @type modify_fn: callable
2078 @param modify_fn: Callback for modifying an existing item
2079 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2080 and private data object as added by L{_PrepareContainerMods}, returns
2081 changes as list
2082 @type remove_fn: callable
2083 @param remove_fn: Callback on removing item; receives absolute item index,
2084 item and private data object as added by L{_PrepareContainerMods}
2085
2086 """
2087 for (op, identifier, params, private) in mods:
2088 changes = None
2089
2090 if op == constants.DDM_ADD:
2091 # Calculate where item will be added
2092 # When adding an item, identifier can only be an index
2093 try:
2094 idx = int(identifier)
2095 except ValueError:
2096 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2097 " identifier for %s" % constants.DDM_ADD,
2098 errors.ECODE_INVAL)
2099 if idx == -1:
2100 addidx = len(container)
2101 else:
2102 if idx < 0:
2103 raise IndexError("Not accepting negative indices other than -1")
2104 elif idx > len(container):
2105 raise IndexError("Got %s index %s, but there are only %s" %
2106 (kind, idx, len(container)))
2107 addidx = idx
2108
2109 if create_fn is None:
2110 item = params
2111 else:
2112 (item, changes) = create_fn(addidx, params, private)
2113
2114 if idx == -1:
2115 container.append(item)
2116 else:
2117 assert idx >= 0
2118 assert idx <= len(container)
2119 # list.insert does so before the specified index
2120 container.insert(idx, item)
2121 else:
2122 # Retrieve existing item
2123 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2124
2125 if op == constants.DDM_REMOVE:
2126 assert not params
2127
2128 if remove_fn is not None:
2129 remove_fn(absidx, item, private)
2130
2131 changes = [("%s/%s" % (kind, absidx), "remove")]
2132
2133 assert container[absidx] == item
2134 del container[absidx]
2135 elif op == constants.DDM_MODIFY:
2136 if modify_fn is not None:
2137 changes = modify_fn(absidx, item, params, private)
2138 else:
2139 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2140
2141 assert _TApplyContModsCbChanges(changes)
2142
2143 if not (chgdesc is None or changes is None):
2144 chgdesc.extend(changes)
2145
2146
2147 def _UpdateIvNames(base_index, disks):
2148 """Updates the C{iv_name} attribute of disks.
2149
2150 @type disks: list of L{objects.Disk}
2151
2152 """
2153 for (idx, disk) in enumerate(disks):
2154 disk.iv_name = "disk/%s" % (base_index + idx, )
2155
2156
2157 class LUInstanceSetParams(LogicalUnit):
2158 """Modifies an instances's parameters.
2159
2160 """
2161 HPATH = "instance-modify"
2162 HTYPE = constants.HTYPE_INSTANCE
2163 REQ_BGL = False
2164
2165 @staticmethod
2166 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2167 assert ht.TList(mods)
2168 assert not mods or len(mods[0]) in (2, 3)
2169
2170 if mods and len(mods[0]) == 2:
2171 result = []
2172
2173 addremove = 0
2174 for op, params in mods:
2175 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2176 result.append((op, -1, params))
2177 addremove += 1
2178
2179 if addremove > 1:
2180 raise errors.OpPrereqError("Only one %s add or remove operation is"
2181 " supported at a time" % kind,
2182 errors.ECODE_INVAL)
2183 else:
2184 result.append((constants.DDM_MODIFY, op, params))
2185
2186 assert verify_fn(result)
2187 else:
2188 result = mods
2189
2190 return result
2191
2192 @staticmethod
2193 def _CheckMods(kind, mods, key_types, item_fn):
2194 """Ensures requested disk/NIC modifications are valid.
2195
2196 """
2197 for (op, _, params) in mods:
2198 assert ht.TDict(params)
2199
2200 # If 'key_types' is an empty dict, we assume we have an
2201 # 'ext' template and thus do not ForceDictType
2202 if key_types:
2203 utils.ForceDictType(params, key_types)
2204
2205 if op == constants.DDM_REMOVE:
2206 if params:
2207 raise errors.OpPrereqError("No settings should be passed when"
2208 " removing a %s" % kind,
2209 errors.ECODE_INVAL)
2210 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2211 item_fn(op, params)
2212 else:
2213 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2214
2215 @staticmethod
2216 def _VerifyDiskModification(op, params):
2217 """Verifies a disk modification.
2218
2219 """
2220 if op == constants.DDM_ADD:
2221 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2222 if mode not in constants.DISK_ACCESS_SET:
2223 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2224 errors.ECODE_INVAL)
2225
2226 size = params.get(constants.IDISK_SIZE, None)
2227 if size is None:
2228 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2229 constants.IDISK_SIZE, errors.ECODE_INVAL)
2230
2231 try:
2232 size = int(size)
2233 except (TypeError, ValueError), err:
2234 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2235 errors.ECODE_INVAL)
2236
2237 params[constants.IDISK_SIZE] = size
2238 name = params.get(constants.IDISK_NAME, None)
2239 if name is not None and name.lower() == constants.VALUE_NONE:
2240 params[constants.IDISK_NAME] = None
2241
2242 elif op == constants.DDM_MODIFY:
2243 if constants.IDISK_SIZE in params:
2244 raise errors.OpPrereqError("Disk size change not possible, use"
2245 " grow-disk", errors.ECODE_INVAL)
2246 if len(params) > 2:
2247 raise errors.OpPrereqError("Disk modification doesn't support"
2248 " additional arbitrary parameters",
2249 errors.ECODE_INVAL)
2250 name = params.get(constants.IDISK_NAME, None)
2251 if name is not None and name.lower() == constants.VALUE_NONE:
2252 params[constants.IDISK_NAME] = None
2253
2254 @staticmethod
2255 def _VerifyNicModification(op, params):
2256 """Verifies a network interface modification.
2257
2258 """
2259 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2260 ip = params.get(constants.INIC_IP, None)
2261 name = params.get(constants.INIC_NAME, None)
2262 req_net = params.get(constants.INIC_NETWORK, None)
2263 link = params.get(constants.NIC_LINK, None)
2264 mode = params.get(constants.NIC_MODE, None)
2265 if name is not None and name.lower() == constants.VALUE_NONE:
2266 params[constants.INIC_NAME] = None
2267 if req_net is not None:
2268 if req_net.lower() == constants.VALUE_NONE:
2269 params[constants.INIC_NETWORK] = None
2270 req_net = None
2271 elif link is not None or mode is not None:
2272 raise errors.OpPrereqError("If network is given"
2273 " mode or link should not",
2274 errors.ECODE_INVAL)
2275
2276 if op == constants.DDM_ADD:
2277 macaddr = params.get(constants.INIC_MAC, None)
2278 if macaddr is None:
2279 params[constants.INIC_MAC] = constants.VALUE_AUTO
2280
2281 if ip is not None:
2282 if ip.lower() == constants.VALUE_NONE:
2283 params[constants.INIC_IP] = None
2284 else:
2285 if ip.lower() == constants.NIC_IP_POOL:
2286 if op == constants.DDM_ADD and req_net is None:
2287 raise errors.OpPrereqError("If ip=pool, parameter network"
2288 " cannot be none",
2289 errors.ECODE_INVAL)
2290 else:
2291 if not netutils.IPAddress.IsValid(ip):
2292 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2293 errors.ECODE_INVAL)
2294
2295 if constants.INIC_MAC in params:
2296 macaddr = params[constants.INIC_MAC]
2297 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2298 macaddr = utils.NormalizeAndValidateMac(macaddr)
2299
2300 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2301 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2302 " modifying an existing NIC",
2303 errors.ECODE_INVAL)
2304
2305 def CheckArguments(self):
2306 if not (self.op.nics or self.op.disks or self.op.disk_template or
2307 self.op.hvparams or self.op.beparams or self.op.os_name or
2308 self.op.offline is not None or self.op.runtime_mem or
2309 self.op.pnode):
2310 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2311
2312 if self.op.hvparams:
2313 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2314 "hypervisor", "instance", "cluster")
2315
2316 self.op.disks = self._UpgradeDiskNicMods(
2317 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2318 self.op.nics = self._UpgradeDiskNicMods(
2319 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2320
2321 if self.op.disks and self.op.disk_template is not None:
2322 raise errors.OpPrereqError("Disk template conversion and other disk"
2323 " changes not supported at the same time",
2324 errors.ECODE_INVAL)
2325
2326 if (self.op.disk_template and
2327 self.op.disk_template in constants.DTS_INT_MIRROR and
2328 self.op.remote_node is None):
2329 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2330 " one requires specifying a secondary node",
2331 errors.ECODE_INVAL)
2332
2333 # Check NIC modifications
2334 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2335 self._VerifyNicModification)
2336
2337 if self.op.pnode:
2338 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2339
2340 def ExpandNames(self):
2341 self._ExpandAndLockInstance()
2342 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2343 # Can't even acquire node locks in shared mode as upcoming changes in
2344 # Ganeti 2.6 will start to modify the node object on disk conversion
2345 self.needed_locks[locking.LEVEL_NODE] = []
2346 self.needed_locks[locking.LEVEL_NODE_RES] = []
2347 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2348 # Look node group to look up the ipolicy
2349 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2350
2351 def DeclareLocks(self, level):
2352 if level == locking.LEVEL_NODEGROUP:
2353 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2354 # Acquire locks for the instance's nodegroups optimistically. Needs
2355 # to be verified in CheckPrereq
2356 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2357 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2358 elif level == locking.LEVEL_NODE:
2359 self._LockInstancesNodes()
2360 if self.op.disk_template and self.op.remote_node:
2361 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2362 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2363 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2364 # Copy node locks
2365 self.needed_locks[locking.LEVEL_NODE_RES] = \
2366 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2367
2368 def BuildHooksEnv(self):
2369 """Build hooks env.
2370
2371 This runs on the master, primary and secondaries.
2372
2373 """
2374 args = {}
2375 if constants.BE_MINMEM in self.be_new:
2376 args["minmem"] = self.be_new[constants.BE_MINMEM]
2377 if constants.BE_MAXMEM in self.be_new:
2378 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2379 if constants.BE_VCPUS in self.be_new:
2380 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2381 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2382 # information at all.
2383
2384 if self._new_nics is not None:
2385 nics = []
2386
2387 for nic in self._new_nics:
2388 n = copy.deepcopy(nic)
2389 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2390 n.nicparams = nicparams
2391 nics.append(NICToTuple(self, n))
2392
2393 args["nics"] = nics
2394
2395 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2396 if self.op.disk_template:
2397 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2398 if self.op.runtime_mem:
2399 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2400
2401 return env
2402
2403 def BuildHooksNodes(self):
2404 """Build hooks nodes.
2405
2406 """
2407 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2408 return (nl, nl)
2409
2410 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2411 old_params, cluster, pnode):
2412
2413 update_params_dict = dict([(key, params[key])
2414 for key in constants.NICS_PARAMETERS
2415 if key in params])
2416
2417 req_link = update_params_dict.get(constants.NIC_LINK, None)
2418 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2419
2420 new_net_uuid = None
2421 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2422 if new_net_uuid_or_name:
2423 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2424 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2425
2426 if old_net_uuid:
2427 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2428
2429 if new_net_uuid:
2430 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2431 if not netparams:
2432 raise errors.OpPrereqError("No netparams found for the network"
2433 " %s, probably not connected" %
2434 new_net_obj.name, errors.ECODE_INVAL)
2435 new_params = dict(netparams)
2436 else:
2437 new_params = GetUpdatedParams(old_params, update_params_dict)
2438
2439 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2440
2441 new_filled_params = cluster.SimpleFillNIC(new_params)
2442 objects.NIC.CheckParameterSyntax(new_filled_params)
2443
2444 new_mode = new_filled_params[constants.NIC_MODE]
2445 if new_mode == constants.NIC_MODE_BRIDGED:
2446 bridge = new_filled_params[constants.NIC_LINK]
2447 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2448 if msg:
2449 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2450 if self.op.force:
2451 self.warn.append(msg)
2452 else:
2453 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2454
2455 elif new_mode == constants.NIC_MODE_ROUTED:
2456 ip = params.get(constants.INIC_IP, old_ip)
2457 if ip is None:
2458 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2459 " on a routed NIC", errors.ECODE_INVAL)
2460
2461 elif new_mode == constants.NIC_MODE_OVS:
2462 # TODO: check OVS link
2463 self.LogInfo("OVS links are currently not checked for correctness")
2464
2465 if constants.INIC_MAC in params:
2466 mac = params[constants.INIC_MAC]
2467 if mac is None:
2468 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2469 errors.ECODE_INVAL)
2470 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2471 # otherwise generate the MAC address
2472 params[constants.INIC_MAC] = \
2473 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2474 else:
2475 # or validate/reserve the current one
2476 try:
2477 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2478 except errors.ReservationError:
2479 raise errors.OpPrereqError("MAC address '%s' already in use"
2480 " in cluster" % mac,
2481 errors.ECODE_NOTUNIQUE)
2482 elif new_net_uuid != old_net_uuid:
2483
2484 def get_net_prefix(net_uuid):
2485 mac_prefix = None
2486 if net_uuid:
2487 nobj = self.cfg.GetNetwork(net_uuid)
2488 mac_prefix = nobj.mac_prefix
2489
2490 return mac_prefix
2491
2492 new_prefix = get_net_prefix(new_net_uuid)
2493 old_prefix = get_net_prefix(old_net_uuid)
2494 if old_prefix != new_prefix:
2495 params[constants.INIC_MAC] = \
2496 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2497
2498 # if there is a change in (ip, network) tuple
2499 new_ip = params.get(constants.INIC_IP, old_ip)
2500 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2501 if new_ip:
2502 # if IP is pool then require a network and generate one IP
2503 if new_ip.lower() == constants.NIC_IP_POOL:
2504 if new_net_uuid:
2505 try:
2506 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2507 except errors.ReservationError:
2508 raise errors.OpPrereqError("Unable to get a free IP"
2509 " from the address pool",
2510 errors.ECODE_STATE)
2511 self.LogInfo("Chose IP %s from network %s",
2512 new_ip,
2513 new_net_obj.name)
2514 params[constants.INIC_IP] = new_ip
2515 else:
2516 raise errors.OpPrereqError("ip=pool, but no network found",
2517 errors.ECODE_INVAL)
2518 # Reserve new IP if in the new network if any
2519 elif new_net_uuid:
2520 try:
2521 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2522 self.LogInfo("Reserving IP %s in network %s",
2523 new_ip, new_net_obj.name)
2524 except errors.ReservationError:
2525 raise errors.OpPrereqError("IP %s not available in network %s" %
2526 (new_ip, new_net_obj.name),
2527 errors.ECODE_NOTUNIQUE)
2528 # new network is None so check if new IP is a conflicting IP
2529 elif self.op.conflicts_check:
2530 _CheckForConflictingIp(self, new_ip, pnode)
2531
2532 # release old IP if old network is not None
2533 if old_ip and old_net_uuid:
2534 try:
2535 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2536 except errors.AddressPoolError:
2537 logging.warning("Release IP %s not contained in network %s",
2538 old_ip, old_net_obj.name)
2539
2540 # there are no changes in (ip, network) tuple and old network is not None
2541 elif (old_net_uuid is not None and
2542 (req_link is not None or req_mode is not None)):
2543 raise errors.OpPrereqError("Not allowed to change link or mode of"
2544 " a NIC that is connected to a network",
2545 errors.ECODE_INVAL)
2546
2547 private.params = new_params
2548 private.filled = new_filled_params
2549
2550 def _PreCheckDiskTemplate(self, pnode_info):
2551 """CheckPrereq checks related to a new disk template."""
2552 # Arguments are passed to avoid configuration lookups
2553 instance = self.instance
2554 pnode = instance.primary_node
2555 cluster = self.cluster
2556 if instance.disk_template == self.op.disk_template:
2557 raise errors.OpPrereqError("Instance already has disk template %s" %
2558 instance.disk_template, errors.ECODE_INVAL)
2559
2560 if (instance.disk_template,
2561 self.op.disk_template) not in self._DISK_CONVERSIONS:
2562 raise errors.OpPrereqError("Unsupported disk template conversion from"
2563 " %s to %s" % (instance.disk_template,
2564 self.op.disk_template),
2565 errors.ECODE_INVAL)
2566 CheckInstanceState(self, instance, INSTANCE_DOWN,
2567 msg="cannot change disk template")
2568 if self.op.disk_template in constants.DTS_INT_MIRROR:
2569 if self.op.remote_node == pnode:
2570 raise errors.OpPrereqError("Given new secondary node %s is the same"
2571 " as the primary node of the instance" %
2572 self.op.remote_node, errors.ECODE_STATE)
2573 CheckNodeOnline(self, self.op.remote_node)
2574 CheckNodeNotDrained(self, self.op.remote_node)
2575 # FIXME: here we assume that the old instance type is DT_PLAIN
2576 assert instance.disk_template == constants.DT_PLAIN
2577 disks = [{constants.IDISK_SIZE: d.size,
2578 constants.IDISK_VG: d.logical_id[0]}
2579 for d in instance.disks]
2580 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2581 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2582
2583 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2584 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2585 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2586 snode_group)
2587 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2588 ignore=self.op.ignore_ipolicy)
2589 if pnode_info.group != snode_info.group:
2590 self.LogWarning("The primary and secondary nodes are in two"
2591 " different node groups; the disk parameters"
2592 " from the first disk's node group will be"
2593 " used")
2594
2595 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2596 # Make sure none of the nodes require exclusive storage
2597 nodes = [pnode_info]
2598 if self.op.disk_template in constants.DTS_INT_MIRROR:
2599 assert snode_info
2600 nodes.append(snode_info)
2601 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2602 if compat.any(map(has_es, nodes)):
2603 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2604 " storage is enabled" % (instance.disk_template,
2605 self.op.disk_template))
2606 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2607
2608 def CheckPrereq(self):
2609 """Check prerequisites.
2610
2611 This only checks the instance list against the existing names.
2612
2613 """
2614 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2615 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2616
2617 cluster = self.cluster = self.cfg.GetClusterInfo()
2618 assert self.instance is not None, \
2619 "Cannot retrieve locked instance %s" % self.op.instance_name
2620
2621 pnode = instance.primary_node
2622
2623 self.warn = []
2624
2625 if (self.op.pnode is not None and self.op.pnode != pnode and
2626 not self.op.force):
2627 # verify that the instance is not up
2628 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2629 instance.hypervisor)
2630 if instance_info.fail_msg:
2631 self.warn.append("Can't get instance runtime information: %s" %
2632 instance_info.fail_msg)
2633 elif instance_info.payload:
2634 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2635 errors.ECODE_STATE)
2636
2637 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2638 nodelist = list(instance.all_nodes)
2639 pnode_info = self.cfg.GetNodeInfo(pnode)
2640 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2641
2642 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2643 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2644 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2645
2646 # dictionary with instance information after the modification
2647 ispec = {}
2648
2649 # Check disk modifications. This is done here and not in CheckArguments
2650 # (as with NICs), because we need to know the instance's disk template
2651 if instance.disk_template == constants.DT_EXT:
2652 self._CheckMods("disk", self.op.disks, {},
2653 self._VerifyDiskModification)
2654 else:
2655 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2656 self._VerifyDiskModification)
2657
2658 # Prepare disk/NIC modifications
2659 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2660 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2661
2662 # Check the validity of the `provider' parameter
2663 if instance.disk_template in constants.DT_EXT:
2664 for mod in self.diskmod:
2665 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2666 if mod[0] == constants.DDM_ADD:
2667 if ext_provider is None:
2668 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2669 " '%s' missing, during disk add" %
2670 (constants.DT_EXT,
2671 constants.IDISK_PROVIDER),
2672 errors.ECODE_NOENT)
2673 elif mod[0] == constants.DDM_MODIFY:
2674 if ext_provider:
2675 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2676 " modification" %
2677 constants.IDISK_PROVIDER,
2678 errors.ECODE_INVAL)
2679 else:
2680 for mod in self.diskmod:
2681 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2682 if ext_provider is not None:
2683 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2684 " instances of type '%s'" %
2685 (constants.IDISK_PROVIDER,
2686 constants.DT_EXT),
2687 errors.ECODE_INVAL)
2688
2689 # OS change
2690 if self.op.os_name and not self.op.force:
2691 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2692 self.op.force_variant)
2693 instance_os = self.op.os_name
2694 else:
2695 instance_os = instance.os
2696
2697 assert not (self.op.disk_template and self.op.disks), \
2698 "Can't modify disk template and apply disk changes at the same time"
2699
2700 if self.op.disk_template:
2701 self._PreCheckDiskTemplate(pnode_info)
2702
2703 # hvparams processing
2704 if self.op.hvparams:
2705 hv_type = instance.hypervisor
2706 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2707 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2708 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2709
2710 # local check
2711 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2712 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2713 self.hv_proposed = self.hv_new = hv_new # the new actual values
2714 self.hv_inst = i_hvdict # the new dict (without defaults)
2715 else:
2716 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2717 instance.hvparams)
2718 self.hv_new = self.hv_inst = {}
2719
2720 # beparams processing
2721 if self.op.beparams:
2722 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2723 use_none=True)
2724 objects.UpgradeBeParams(i_bedict)
2725 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2726 be_new = cluster.SimpleFillBE(i_bedict)
2727 self.be_proposed = self.be_new = be_new # the new actual values
2728 self.be_inst = i_bedict # the new dict (without defaults)
2729 else:
2730 self.be_new = self.be_inst = {}
2731 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2732 be_old = cluster.FillBE(instance)
2733
2734 # CPU param validation -- checking every time a parameter is
2735 # changed to cover all cases where either CPU mask or vcpus have
2736 # changed
2737 if (constants.BE_VCPUS in self.be_proposed and
2738 constants.HV_CPU_MASK in self.hv_proposed):
2739 cpu_list = \
2740 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2741 # Verify mask is consistent with number of vCPUs. Can skip this
2742 # test if only 1 entry in the CPU mask, which means same mask
2743 # is applied to all vCPUs.
2744 if (len(cpu_list) > 1 and
2745 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2746 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2747 " CPU mask [%s]" %
2748 (self.be_proposed[constants.BE_VCPUS],
2749 self.hv_proposed[constants.HV_CPU_MASK]),
2750 errors.ECODE_INVAL)
2751
2752 # Only perform this test if a new CPU mask is given
2753 if constants.HV_CPU_MASK in self.hv_new:
2754 # Calculate the largest CPU number requested
2755 max_requested_cpu = max(map(max, cpu_list))
2756 # Check that all of the instance's nodes have enough physical CPUs to
2757 # satisfy the requested CPU mask
2758 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2759 max_requested_cpu + 1, instance.hypervisor)
2760
2761 # osparams processing
2762 if self.op.osparams:
2763 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2764 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2765 self.os_inst = i_osdict # the new dict (without defaults)
2766 else:
2767 self.os_inst = {}
2768
2769 #TODO(dynmem): do the appropriate check involving MINMEM
2770 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2771 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2772 mem_check_list = [pnode]
2773 if be_new[constants.BE_AUTO_BALANCE]:
2774 # either we changed auto_balance to yes or it was from before
2775 mem_check_list.extend(instance.secondary_nodes)
2776 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2777 instance.hypervisor)
2778 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2779 [instance.hypervisor], False)
2780 pninfo = nodeinfo[pnode]
2781 msg = pninfo.fail_msg
2782 if msg:
2783 # Assume the primary node is unreachable and go ahead
2784 self.warn.append("Can't get info from primary node %s: %s" %
2785 (pnode, msg))
2786 else:
2787 (_, _, (pnhvinfo, )) = pninfo.payload
2788 if not isinstance(pnhvinfo.get("memory_free", None), int):
2789 self.warn.append("Node data from primary node %s doesn't contain"
2790 " free memory information" % pnode)
2791 elif instance_info.fail_msg:
2792 self.warn.append("Can't get instance runtime information: %s" %
2793 instance_info.fail_msg)
2794 else:
2795 if instance_info.payload:
2796 current_mem = int(instance_info.payload["memory"])
2797 else:
2798 # Assume instance not running
2799 # (there is a slight race condition here, but it's not very
2800 # probable, and we have no other way to check)
2801 # TODO: Describe race condition
2802 current_mem = 0
2803 #TODO(dynmem): do the appropriate check involving MINMEM
2804 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2805 pnhvinfo["memory_free"])
2806 if miss_mem > 0:
2807 raise errors.OpPrereqError("This change will prevent the instance"
2808 " from starting, due to %d MB of memory"
2809 " missing on its primary node" %
2810 miss_mem, errors.ECODE_NORES)
2811
2812 if be_new[constants.BE_AUTO_BALANCE]:
2813 for node, nres in nodeinfo.items():
2814 if node not in instance.secondary_nodes:
2815 continue
2816 nres.Raise("Can't get info from secondary node %s" % node,
2817 prereq=True, ecode=errors.ECODE_STATE)
2818 (_, _, (nhvinfo, )) = nres.payload
2819 if not isinstance(nhvinfo.get("memory_free", None), int):
2820 raise errors.OpPrereqError("Secondary node %s didn't return free"
2821 " memory information" % node,
2822 errors.ECODE_STATE)
2823 #TODO(dynmem): do the appropriate check involving MINMEM
2824 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2825 raise errors.OpPrereqError("This change will prevent the instance"
2826 " from failover to its secondary node"
2827 " %s, due to not enough memory" % node,
2828 errors.ECODE_STATE)
2829
2830 if self.op.runtime_mem:
2831 remote_info = self.rpc.call_instance_info(instance.primary_node,
2832 instance.name,
2833 instance.hypervisor)
2834 remote_info.Raise("Error checking node %s" % instance.primary_node)
2835 if not remote_info.payload: # not running already
2836 raise errors.OpPrereqError("Instance %s is not running" %
2837 instance.name, errors.ECODE_STATE)
2838
2839 current_memory = remote_info.payload["memory"]
2840 if (not self.op.force and
2841 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2842 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2843 raise errors.OpPrereqError("Instance %s must have memory between %d"
2844 " and %d MB of memory unless --force is"
2845 " given" %
2846 (instance.name,
2847 self.be_proposed[constants.BE_MINMEM],
2848 self.be_proposed[constants.BE_MAXMEM]),
2849 errors.ECODE_INVAL)
2850
2851 delta = self.op.runtime_mem - current_memory
2852 if delta > 0:
2853 CheckNodeFreeMemory(self, instance.primary_node,
2854 "ballooning memory for instance %s" %
2855 instance.name, delta, instance.hypervisor)
2856
2857 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2858 raise errors.OpPrereqError("Disk operations not supported for"
2859 " diskless instances", errors.ECODE_INVAL)
2860
2861 def _PrepareNicCreate(_, params, private):
2862 self._PrepareNicModification(params, private, None, None,
2863 {}, cluster, pnode)
2864 return (None, None)
2865
2866 def _PrepareNicMod(_, nic, params, private):
2867 self._PrepareNicModification(params, private, nic.ip, nic.network,
2868 nic.nicparams, cluster, pnode)
2869 return None
2870
2871 def _PrepareNicRemove(_, params, __):
2872 ip = params.ip
2873 net = params.network
2874 if net is not None and ip is not None:
2875 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2876
2877 # Verify NIC changes (operating on copy)
2878 nics = instance.nics[:]
2879 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2880 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2881 if len(nics) > constants.MAX_NICS:
2882 raise errors.OpPrereqError("Instance has too many network interfaces"
2883 " (%d), cannot add more" % constants.MAX_NICS,
2884 errors.ECODE_STATE)
2885
2886 def _PrepareDiskMod(_, disk, params, __):
2887 disk.name = params.get(constants.IDISK_NAME, None)
2888
2889 # Verify disk changes (operating on a copy)
2890 disks = copy.deepcopy(instance.disks)
2891 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2892 _PrepareDiskMod, None)
2893 utils.ValidateDeviceNames("disk", disks)
2894 if len(disks) > constants.MAX_DISKS:
2895 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2896 " more" % constants.MAX_DISKS,
2897 errors.ECODE_STATE)
2898 disk_sizes = [disk.size for disk in instance.disks]
2899 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2900 self.diskmod if op == constants.DDM_ADD)
2901 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2902 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2903
2904 if self.op.offline is not None and self.op.offline:
2905 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2906 msg="can't change to offline")
2907
2908 # Pre-compute NIC changes (necessary to use result in hooks)
2909 self._nic_chgdesc = []
2910 if self.nicmod:
2911 # Operate on copies as this is still in prereq
2912 nics = [nic.Copy() for nic in instance.nics]
2913 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2914 self._CreateNewNic, self._ApplyNicMods, None)
2915 # Verify that NIC names are unique and valid
2916 utils.ValidateDeviceNames("NIC", nics)
2917 self._new_nics = nics
2918 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2919 else:
2920 self._new_nics = None
2921 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2922
2923 if not self.op.ignore_ipolicy:
2924 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2925 group_info)
2926
2927 # Fill ispec with backend parameters
2928 ispec[constants.ISPEC_SPINDLE_USE] = \
2929 self.be_new.get(constants.BE_SPINDLE_USE, None)
2930 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2931 None)
2932
2933 # Copy ispec to verify parameters with min/max values separately
2934 if self.op.disk_template:
2935 new_disk_template = self.op.disk_template
2936 else:
2937 new_disk_template = instance.disk_template
2938 ispec_max = ispec.copy()
2939 ispec_max[constants.ISPEC_MEM_SIZE] = \
2940 self.be_new.get(constants.BE_MAXMEM, None)
2941 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2942 new_disk_template)
2943 ispec_min = ispec.copy()
2944 ispec_min[constants.ISPEC_MEM_SIZE] = \
2945 self.be_new.get(constants.BE_MINMEM, None)
2946 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2947 new_disk_template)
2948
2949 if (res_max or res_min):
2950 # FIXME: Improve error message by including information about whether
2951 # the upper or lower limit of the parameter fails the ipolicy.
2952 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2953 (group_info, group_info.name,
2954 utils.CommaJoin(set(res_max + res_min))))
2955 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2956
2957 def _ConvertPlainToDrbd(self, feedback_fn):
2958 """Converts an instance from plain to drbd.
2959
2960 """
2961 feedback_fn("Converting template to drbd")
2962 instance = self.instance
2963 pnode = instance.primary_node
2964 snode = self.op.remote_node
2965
2966 assert instance.disk_template == constants.DT_PLAIN
2967
2968 # create a fake disk info for _GenerateDiskTemplate
2969 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2970 constants.IDISK_VG: d.logical_id[0],
2971 constants.IDISK_NAME: d.name}
2972 for d in instance.disks]
2973 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2974 instance.name, pnode, [snode],
2975 disk_info, None, None, 0, feedback_fn,
2976 self.diskparams)
2977 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2978 self.diskparams)
2979 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2980 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2981 info = GetInstanceInfoText(instance)
2982 feedback_fn("Creating additional volumes...")
2983 # first, create the missing data and meta devices
2984 for disk in anno_disks:
2985 # unfortunately this is... not too nice
2986 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2987 info, True, p_excl_stor)
2988 for child in disk.children:
2989 CreateSingleBlockDev(self, snode, instance, child, info, True,
2990 s_excl_stor)
2991 # at this stage, all new LVs have been created, we can rename the
2992 # old ones
2993 feedback_fn("Renaming original volumes...")
2994 rename_list = [(o, n.children[0].logical_id)
2995 for (o, n) in zip(instance.disks, new_disks)]
2996 result = self.rpc.call_blockdev_rename(pnode, rename_list)
2997 result.Raise("Failed to rename original LVs")
2998
2999 feedback_fn("Initializing DRBD devices...")
3000 # all child devices are in place, we can now create the DRBD devices
3001 try:
3002 for disk in anno_disks:
3003 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3004 f_create = node == pnode
3005 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3006 excl_stor)
3007 except errors.GenericError, e:
3008 feedback_fn("Initializing of DRBD devices failed;"
3009 " renaming back original volumes...")
3010 for disk in new_disks:
3011 self.cfg.SetDiskID(disk, pnode)
3012 rename_back_list = [(n.children[0], o.logical_id)
3013 for (n, o) in zip(new_disks, instance.disks)]
3014 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3015 result.Raise("Failed to rename LVs back after error %s" % str(e))
3016 raise
3017
3018 # at this point, the instance has been modified
3019 instance.disk_template = constants.DT_DRBD8
3020 instance.disks = new_disks
3021 self.cfg.Update(instance, feedback_fn)
3022
3023 # Release node locks while waiting for sync
3024 ReleaseLocks(self, locking.LEVEL_NODE)
3025
3026 # disks are created, waiting for sync
3027 disk_abort = not WaitForSync(self, instance,
3028 oneshot=not self.op.wait_for_sync)
3029 if disk_abort:
3030 raise errors.OpExecError("There are some degraded disks for"
3031 " this instance, please cleanup manually")
3032
3033 # Node resource locks will be released by caller
3034
3035 def _ConvertDrbdToPlain(self, feedback_fn):
3036 """Converts an instance from drbd to plain.
3037
3038 """
3039 instance = self.instance
3040
3041 assert len(instance.secondary_nodes) == 1
3042 assert instance.disk_template == constants.DT_DRBD8
3043
3044 pnode = instance.primary_node
3045 snode = instance.secondary_nodes[0]
3046 feedback_fn("Converting template to plain")
3047
3048 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3049 new_disks = [d.children[0] for d in instance.disks]
3050
3051 # copy over size, mode and name
3052 for parent, child in zip(old_disks, new_disks):
3053 child.size = parent.size
3054 child.mode = parent.mode
3055 child.name = parent.name
3056
3057 # this is a DRBD disk, return its port to the pool
3058 # NOTE: this must be done right before the call to cfg.Update!
3059 for disk in old_disks:
3060 tcp_port = disk.logical_id[2]
3061 self.cfg.AddTcpUdpPort(tcp_port)
3062
3063 # update instance structure
3064 instance.disks = new_disks
3065 instance.disk_template = constants.DT_PLAIN
3066 _UpdateIvNames(0, instance.disks)
3067 self.cfg.Update(instance, feedback_fn)
3068
3069 # Release locks in case removing disks takes a while
3070 ReleaseLocks(self, locking.LEVEL_NODE)
3071
3072 feedback_fn("Removing volumes on the secondary node...")
3073 for disk in old_disks:
3074 self.cfg.SetDiskID(disk, snode)
3075 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3076 if msg:
3077 self.LogWarning("Could not remove block device %s on node %s,"
3078 " continuing anyway: %s", disk.iv_name, snode, msg)
3079
3080 feedback_fn("Removing unneeded volumes on the primary node...")
3081 for idx, disk in enumerate(old_disks):
3082 meta = disk.children[1]
3083 self.cfg.SetDiskID(meta, pnode)
3084 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3085 if msg:
3086 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3087 " continuing anyway: %s", idx, pnode, msg)
3088
3089 def _CreateNewDisk(self, idx, params, _):
3090 """Creates a new disk.
3091
3092 """
3093 instance = self.instance
3094
3095 # add a new disk
3096 if instance.disk_template in constants.DTS_FILEBASED:
3097 (file_driver, file_path) = instance.disks[0].logical_id
3098 file_path = os.path.dirname(file_path)
3099 else:
3100 file_driver = file_path = None
3101
3102 disk = \
3103 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3104 instance.primary_node, instance.secondary_nodes,
3105 [params], file_path, file_driver, idx,
3106 self.Log, self.diskparams)[0]
3107
3108 new_disks = CreateDisks(self, instance, disks=[disk])
3109
3110 if self.cluster.prealloc_wipe_disks:
3111 # Wipe new disk
3112 WipeOrCleanupDisks(self, instance,
3113 disks=[(idx, disk, 0)],
3114 cleanup=new_disks)
3115
3116 return (disk, [
3117 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3118 ])
3119
3120 @staticmethod
3121 def _ModifyDisk(idx, disk, params, _):
3122 """Modifies a disk.
3123
3124 """
3125 changes = []
3126 mode = params.get(constants.IDISK_MODE, None)
3127 if mode:
3128 disk.mode = mode
3129 changes.append(("disk.mode/%d" % idx, disk.mode))
3130
3131 name = params.get(constants.IDISK_NAME, None)
3132 disk.name = name
3133 changes.append(("disk.name/%d" % idx, disk.name))
3134
3135 return changes
3136
3137 def _RemoveDisk(self, idx, root, _):
3138 """Removes a disk.
3139
3140 """
3141 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3142 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3143 self.cfg.SetDiskID(disk, node)
3144 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3145 if msg:
3146 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3147 " continuing anyway", idx, node, msg)
3148
3149 # if this is a DRBD disk, return its port to the pool
3150 if root.dev_type in constants.LDS_DRBD:
3151 self.cfg.AddTcpUdpPort(root.logical_id[2])
3152
3153 def _CreateNewNic(self, idx, params, private):
3154 """Creates data structure for a new network interface.
3155
3156 """
3157 mac = params[constants.INIC_MAC]
3158 ip = params.get(constants.INIC_IP, None)
3159 net = params.get(constants.INIC_NETWORK, None)
3160 name = params.get(constants.INIC_NAME, None)
3161 net_uuid = self.cfg.LookupNetwork(net)
3162 #TODO: not private.filled?? can a nic have no nicparams??
3163 nicparams = private.filled
3164 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3165 nicparams=nicparams)
3166 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3167
3168 return (nobj, [
3169 ("nic.%d" % idx,
3170 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3171 (mac, ip, private.filled[constants.NIC_MODE],
3172 private.filled[constants.NIC_LINK],
3173 net)),
3174 ])
3175
3176 def _ApplyNicMods(self, idx, nic, params, private):
3177 """Modifies a network interface.
3178
3179 """
3180 changes = []
3181
3182 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3183 if key in params:
3184 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3185 setattr(nic, key, params[key])
3186
3187 new_net = params.get(constants.INIC_NETWORK, nic.network)
3188 new_net_uuid = self.cfg.LookupNetwork(new_net)
3189 if new_net_uuid != nic.network:
3190 changes.append(("nic.network/%d" % idx, new_net))
3191 nic.network = new_net_uuid
3192
3193 if private.filled:
3194 nic.nicparams = private.filled
3195
3196 for (key, val) in nic.nicparams.items():
3197 changes.append(("nic.%s/%d" % (key, idx), val))
3198
3199 return changes
3200
3201 def Exec(self, feedback_fn):
3202 """Modifies an instance.
3203
3204 All parameters take effect only at the next restart of the instance.
3205
3206 """
3207 # Process here the warnings from CheckPrereq, as we don't have a
3208 # feedback_fn there.
3209 # TODO: Replace with self.LogWarning
3210 for warn in self.warn:
3211 feedback_fn("WARNING: %s" % warn)
3212
3213 assert ((self.op.disk_template is None) ^
3214 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3215 "Not owning any node resource locks"
3216
3217 result = []
3218 instance = self.instance
3219
3220 # New primary node
3221 if self.op.pnode:
3222 instance.primary_node = self.op.pnode
3223
3224 # runtime memory
3225 if self.op.runtime_mem:
3226 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3227 instance,
3228 self.op.runtime_mem)
3229 rpcres.Raise("Cannot modify instance runtime memory")
3230 result.append(("runtime_memory", self.op.runtime_mem))
3231
3232 # Apply disk changes
3233 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3234 self._CreateNewDisk, self._ModifyDisk,
3235 self._RemoveDisk)
3236 _UpdateIvNames(0, instance.disks)
3237
3238 if self.op.disk_template:
3239 if __debug__:
3240 check_nodes = set(instance.all_nodes)
3241 if self.op.remote_node:
3242 check_nodes.add(self.op.remote_node)
3243 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3244 owned = self.owned_locks(level)
3245 assert not (check_nodes - owned), \
3246 ("Not owning the correct locks, owning %r, expected at least %r" %
3247 (owned, check_nodes))
3248
3249 r_shut = ShutdownInstanceDisks(self, instance)
3250 if not r_shut:
3251 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3252 " proceed with disk template conversion")
3253 mode = (instance.disk_template, self.op.disk_template)
3254 try:
3255 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3256 except:
3257 self.cfg.ReleaseDRBDMinors(instance.name)
3258 raise
3259 result.append(("disk_template", self.op.disk_template))
3260
3261 assert instance.disk_template == self.op.disk_template, \
3262 ("Expected disk template '%s', found '%s'" %
3263 (self.op.disk_template, instance.disk_template))
3264
3265 # Release node and resource locks if there are any (they might already have
3266 # been released during disk conversion)
3267 ReleaseLocks(self, locking.LEVEL_NODE)
3268 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3269
3270 # Apply NIC changes
3271 if self._new_nics is not None:
3272 instance.nics = self._new_nics
3273 result.extend(self._nic_chgdesc)
3274
3275 # hvparams changes
3276 if self.op.hvparams:
3277 instance.hvparams = self.hv_inst
3278 for key, val in self.op.hvparams.iteritems():
3279 result.append(("hv/%s" % key, val))
3280
3281 # beparams changes
3282 if self.op.beparams:
3283 instance.beparams = self.be_inst
3284 for key, val in self.op.beparams.iteritems():
3285 result.append(("be/%s" % key, val))
3286
3287 # OS change
3288 if self.op.os_name:
3289 instance.os = self.op.os_name
3290
3291 # osparams changes
3292 if self.op.osparams:
3293 instance.osparams = self.os_inst
3294 for key, val in self.op.osparams.iteritems():
3295 result.append(("os/%s" % key, val))
3296
3297 if self.op.offline is None:
3298 # Ignore
3299 pass
3300 elif self.op.offline:
3301 # Mark instance as offline
3302 self.cfg.MarkInstanceOffline(instance.name)
3303 result.append(("admin_state", constants.ADMINST_OFFLINE))
3304 else:
3305 # Mark instance as online, but stopped
3306 self.cfg.MarkInstanceDown(instance.name)
3307 result.append(("admin_state", constants.ADMINST_DOWN))
3308
3309 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3310
3311 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3312 self.owned_locks(locking.LEVEL_NODE)), \
3313 "All node locks should have been released by now"
3314
3315 return result
3316
3317 _DISK_CONVERSIONS = {
3318 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3319 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3320 }
3321
3322
3323 class LUInstanceChangeGroup(LogicalUnit):
3324 HPATH = "instance-change-group"
3325 HTYPE = constants.HTYPE_INSTANCE
3326 REQ_BGL = False
3327
3328 def ExpandNames(self):
3329 self.share_locks = ShareAll()
3330
3331 self.needed_locks = {
3332 locking.LEVEL_NODEGROUP: [],
3333 locking.LEVEL_NODE: [],
3334 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3335 }
3336
3337 self._ExpandAndLockInstance()
3338
3339 if self.op.target_groups:
3340 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3341 self.op.target_groups)
3342 else:
3343 self.req_target_uuids = None
3344
3345 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3346
3347 def DeclareLocks(self, level):
3348 if level == locking.LEVEL_NODEGROUP:
3349 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3350
3351 if self.req_target_uuids:
3352 lock_groups = set(self.req_target_uuids)
3353
3354 # Lock all groups used by instance optimistically; this requires going
3355 # via the node before it's locked, requiring verification later on
3356 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3357 lock_groups.update(instance_groups)
3358 else:
3359 # No target groups, need to lock all of them
3360 lock_groups = locking.ALL_SET
3361
3362 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3363
3364 elif level == locking.LEVEL_NODE:
3365 if self.req_target_uuids:
3366 # Lock all nodes used by instances
3367 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3368 self._LockInstancesNodes()
3369
3370 # Lock all nodes in all potential target groups
3371 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3372 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3373 member_nodes = [node_name
3374 for group in lock_groups
3375 for node_name in self.cfg.GetNodeGroup(group).members]
3376 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3377 else:
3378 # Lock all nodes as all groups are potential targets
3379 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3380
3381 def CheckPrereq(self):
3382 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3383 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3384 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3385
3386 assert (self.req_target_uuids is None or
3387 owned_groups.issuperset(self.req_target_uuids))
3388 assert owned_instances == set([self.op.instance_name])
3389
3390 # Get instance information
3391 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3392
3393 # Check if node groups for locked instance are still correct
3394 assert owned_nodes.issuperset(self.instance.all_nodes), \
3395 ("Instance %s's nodes changed while we kept the lock" %
3396 self.op.instance_name)
3397
3398 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3399 owned_groups)
3400
3401 if self.req_target_uuids:
3402 # User requested specific target groups
3403 self.target_uuids = frozenset(self.req_target_uuids)
3404 else:
3405 # All groups except those used by the instance are potential targets
3406 self.target_uuids = owned_groups - inst_groups
3407
3408 conflicting_groups = self.target_uuids & inst_groups
3409 if conflicting_groups:
3410 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3411 " used by the instance '%s'" %
3412 (utils.CommaJoin(conflicting_groups),
3413 self.op.instance_name),
3414 errors.ECODE_INVAL)
3415
3416 if not self.target_uuids:
3417 raise errors.OpPrereqError("There are no possible target groups",
3418 errors.ECODE_INVAL)
3419
3420 def BuildHooksEnv(self):
3421 """Build hooks env.
3422
3423 """
3