Export device UUIDs to hooks and OS scripts
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64
65 import ganeti.masterd.instance
66
67
68 #: Type description for changes as returned by L{_ApplyContainerMods}'s
69 #: callbacks
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72 ht.TNonEmptyString,
73 ht.TAny,
74 ])))
75
76
77 def _CheckHostnameSane(lu, name):
78 """Ensures that a given hostname resolves to a 'sane' name.
79
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
82
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
86
87 """
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
95 return hostname
96
97
98 def _CheckOpportunisticLocking(op):
99 """Generate error if opportunistic locking is not possible.
100
101 """
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
105 errors.ECODE_INVAL)
106
107
108 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109 """Wrapper around IAReqInstanceAlloc.
110
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
117
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
119
120 """
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
124 tags=op.tags,
125 os=op.os_type,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
129 disks=disks,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
133
134
135 def _ComputeFullBeParams(op, cluster):
136 """Computes the full beparams.
137
138 @param op: The instance opcode
139 @param cluster: The cluster config object
140
141 @return: The fully filled beparams
142
143 """
144 default_beparams = cluster.beparams[constants.PP_DEFAULT]
145 for param, value in op.beparams.iteritems():
146 if value == constants.VALUE_AUTO:
147 op.beparams[param] = default_beparams[param]
148 objects.UpgradeBeParams(op.beparams)
149 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150 return cluster.SimpleFillBE(op.beparams)
151
152
153 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154 """Computes the nics.
155
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
161
162 @returns: The build up nics
163
164 """
165 nics = []
166 for nic in op.nics:
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
175
176 if net is None or net.lower() == constants.VALUE_NONE:
177 net = None
178 else:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
182 errors.ECODE_INVAL)
183
184 # ip validity checks
185 if ip is None or ip.lower() == constants.VALUE_NONE:
186 nic_ip = None
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
191 errors.ECODE_INVAL)
192 nic_ip = default_ip
193 else:
194 # We defer pool operations until later, so that the iallocator has
195 # filled in the instance's node(s) dimara
196 if ip.lower() == constants.NIC_IP_POOL:
197 if net is None:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
200 errors.ECODE_INVAL)
201
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204 errors.ECODE_INVAL)
205
206 nic_ip = ip
207
208 # TODO: check the ip address for uniqueness
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
211 errors.ECODE_INVAL)
212
213 # MAC address verification
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
217
218 try:
219 # TODO: We need to factor this out
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
223 " in cluster" % mac,
224 errors.ECODE_NOTUNIQUE)
225
226 # Build nic parameters
227 nicparams = {}
228 if nic_mode_req:
229 nicparams[constants.NIC_MODE] = nic_mode
230 if link:
231 nicparams[constants.NIC_LINK] = link
232
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
238 name = None
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 nics.append(nic_obj)
243
244 return nics
245
246
247 def _CheckForConflictingIp(lu, ip, node):
248 """In case of conflicting IP address raise error.
249
250 @type ip: string
251 @param ip: IP address
252 @type node: string
253 @param node: node name
254
255 """
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
260 (ip, conf_net)),
261 errors.ECODE_STATE)
262
263 return (None, None)
264
265
266 def _ComputeIPolicyInstanceSpecViolation(
267 ipolicy, instance_spec, disk_template,
268 _compute_fn=ComputeIPolicySpecViolation):
269 """Compute if instance specs meets the specs of ipolicy.
270
271 @type ipolicy: dict
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
279
280 """
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
290
291
292 def _CheckOSVariant(os_obj, name):
293 """Check whether an OS name conforms to the os variants specification.
294
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
297 @type name: string
298 @param name: OS name passed by the user, to check for validity
299
300 """
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
303 if variant:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
306 errors.ECODE_INVAL)
307 return
308 if not variant:
309 raise errors.OpPrereqError("OS name must include a variant",
310 errors.ECODE_INVAL)
311
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314
315
316 class LUInstanceCreate(LogicalUnit):
317 """Create an instance.
318
319 """
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
322 REQ_BGL = False
323
324 def CheckArguments(self):
325 """Check arguments.
326
327 """
328 # do not require name_check to ease forward/backward compatibility
329 # for tools
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333 # validate/normalize the instance name
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
336
337 if self.op.ip_check and not self.op.name_check:
338 # TODO: make the ip check more flexible and not depend on the name check
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
341
342 # check nics' parameter names
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345 # check that NIC's parameters names are unique and valid
346 utils.ValidateDeviceNames("NIC", self.op.nics)
347
348 # check that disk's names are unique and valid
349 utils.ValidateDeviceNames("disk", self.op.disks)
350
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
358
359 # check disks. parameter names and consistent adopt/no-adopt strategy
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
365 has_adopt = True
366 else:
367 has_no_adopt = True
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
370 errors.ECODE_INVAL)
371 if has_adopt:
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
376 errors.ECODE_INVAL)
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
383 else:
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
388 errors.ECODE_INVAL)
389
390 self.adopt_disks = has_adopt
391
392 # instance name verification
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396 # used in CheckPrereq for ip ping check
397 self.check_ip = self.hostname1.ip
398 else:
399 self.check_ip = None
400
401 # file storage checks
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
406
407 if self.op.disk_template == constants.DT_FILE:
408 opcodes.RequireFileStorage()
409 elif self.op.disk_template == constants.DT_SHARED_FILE:
410 opcodes.RequireSharedFileStorage()
411
412 ### Node/iallocator related checks
413 CheckIAllocatorOrNode(self, "iallocator", "pnode")
414
415 if self.op.pnode is not None:
416 if self.op.disk_template in constants.DTS_INT_MIRROR:
417 if self.op.snode is None:
418 raise errors.OpPrereqError("The networked disk templates need"
419 " a mirror node", errors.ECODE_INVAL)
420 elif self.op.snode:
421 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
422 " template")
423 self.op.snode = None
424
425 _CheckOpportunisticLocking(self.op)
426
427 self._cds = GetClusterDomainSecret()
428
429 if self.op.mode == constants.INSTANCE_IMPORT:
430 # On import force_variant must be True, because if we forced it at
431 # initial install, our only chance when importing it back is that it
432 # works again!
433 self.op.force_variant = True
434
435 if self.op.no_install:
436 self.LogInfo("No-installation mode has no effect during import")
437
438 elif self.op.mode == constants.INSTANCE_CREATE:
439 if self.op.os_type is None:
440 raise errors.OpPrereqError("No guest OS specified",
441 errors.ECODE_INVAL)
442 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
443 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
444 " installation" % self.op.os_type,
445 errors.ECODE_STATE)
446 if self.op.disk_template is None:
447 raise errors.OpPrereqError("No disk template specified",
448 errors.ECODE_INVAL)
449
450 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
451 # Check handshake to ensure both clusters have the same domain secret
452 src_handshake = self.op.source_handshake
453 if not src_handshake:
454 raise errors.OpPrereqError("Missing source handshake",
455 errors.ECODE_INVAL)
456
457 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
458 src_handshake)
459 if errmsg:
460 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
461 errors.ECODE_INVAL)
462
463 # Load and check source CA
464 self.source_x509_ca_pem = self.op.source_x509_ca
465 if not self.source_x509_ca_pem:
466 raise errors.OpPrereqError("Missing source X509 CA",
467 errors.ECODE_INVAL)
468
469 try:
470 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
471 self._cds)
472 except OpenSSL.crypto.Error, err:
473 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
474 (err, ), errors.ECODE_INVAL)
475
476 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
477 if errcode is not None:
478 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
479 errors.ECODE_INVAL)
480
481 self.source_x509_ca = cert
482
483 src_instance_name = self.op.source_instance_name
484 if not src_instance_name:
485 raise errors.OpPrereqError("Missing source instance name",
486 errors.ECODE_INVAL)
487
488 self.source_instance_name = \
489 netutils.GetHostname(name=src_instance_name).name
490
491 else:
492 raise errors.OpPrereqError("Invalid instance creation mode %r" %
493 self.op.mode, errors.ECODE_INVAL)
494
495 def ExpandNames(self):
496 """ExpandNames for CreateInstance.
497
498 Figure out the right locks for instance creation.
499
500 """
501 self.needed_locks = {}
502
503 instance_name = self.op.instance_name
504 # this is just a preventive check, but someone might still add this
505 # instance in the meantime, and creation will fail at lock-add time
506 if instance_name in self.cfg.GetInstanceList():
507 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508 instance_name, errors.ECODE_EXISTS)
509
510 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
511
512 if self.op.iallocator:
513 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514 # specifying a group on instance creation and then selecting nodes from
515 # that group
516 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
518
519 if self.op.opportunistic_locking:
520 self.opportunistic_locks[locking.LEVEL_NODE] = True
521 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
522 else:
523 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
524 nodelist = [self.op.pnode]
525 if self.op.snode is not None:
526 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
527 nodelist.append(self.op.snode)
528 self.needed_locks[locking.LEVEL_NODE] = nodelist
529
530 # in case of import lock the source node too
531 if self.op.mode == constants.INSTANCE_IMPORT:
532 src_node = self.op.src_node
533 src_path = self.op.src_path
534
535 if src_path is None:
536 self.op.src_path = src_path = self.op.instance_name
537
538 if src_node is None:
539 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
540 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
541 self.op.src_node = None
542 if os.path.isabs(src_path):
543 raise errors.OpPrereqError("Importing an instance from a path"
544 " requires a source node option",
545 errors.ECODE_INVAL)
546 else:
547 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
548 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
549 self.needed_locks[locking.LEVEL_NODE].append(src_node)
550 if not os.path.isabs(src_path):
551 self.op.src_path = src_path = \
552 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
553
554 self.needed_locks[locking.LEVEL_NODE_RES] = \
555 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
556
557 def _RunAllocator(self):
558 """Run the allocator based on input opcode.
559
560 """
561 if self.op.opportunistic_locking:
562 # Only consider nodes for which a lock is held
563 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
564 else:
565 node_whitelist = None
566
567 #TODO Export network to iallocator so that it chooses a pnode
568 # in a nodegroup that has the desired network connected to
569 req = _CreateInstanceAllocRequest(self.op, self.disks,
570 self.nics, self.be_full,
571 node_whitelist)
572 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
573
574 ial.Run(self.op.iallocator)
575
576 if not ial.success:
577 # When opportunistic locks are used only a temporary failure is generated
578 if self.op.opportunistic_locking:
579 ecode = errors.ECODE_TEMP_NORES
580 else:
581 ecode = errors.ECODE_NORES
582
583 raise errors.OpPrereqError("Can't compute nodes using"
584 " iallocator '%s': %s" %
585 (self.op.iallocator, ial.info),
586 ecode)
587
588 self.op.pnode = ial.result[0]
589 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
590 self.op.instance_name, self.op.iallocator,
591 utils.CommaJoin(ial.result))
592
593 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
594
595 if req.RequiredNodes() == 2:
596 self.op.snode = ial.result[1]
597
598 def BuildHooksEnv(self):
599 """Build hooks env.
600
601 This runs on master, primary and secondary nodes of the instance.
602
603 """
604 env = {
605 "ADD_MODE": self.op.mode,
606 }
607 if self.op.mode == constants.INSTANCE_IMPORT:
608 env["SRC_NODE"] = self.op.src_node
609 env["SRC_PATH"] = self.op.src_path
610 env["SRC_IMAGES"] = self.src_images
611
612 env.update(BuildInstanceHookEnv(
613 name=self.op.instance_name,
614 primary_node=self.op.pnode,
615 secondary_nodes=self.secondaries,
616 status=self.op.start,
617 os_type=self.op.os_type,
618 minmem=self.be_full[constants.BE_MINMEM],
619 maxmem=self.be_full[constants.BE_MAXMEM],
620 vcpus=self.be_full[constants.BE_VCPUS],
621 nics=NICListToTuple(self, self.nics),
622 disk_template=self.op.disk_template,
623 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
624 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
625 for d in self.disks],
626 bep=self.be_full,
627 hvp=self.hv_full,
628 hypervisor_name=self.op.hypervisor,
629 tags=self.op.tags,
630 ))
631
632 return env
633
634 def BuildHooksNodes(self):
635 """Build hooks nodes.
636
637 """
638 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
639 return nl, nl
640
641 def _ReadExportInfo(self):
642 """Reads the export information from disk.
643
644 It will override the opcode source node and path with the actual
645 information, if these two were not specified before.
646
647 @return: the export information
648
649 """
650 assert self.op.mode == constants.INSTANCE_IMPORT
651
652 src_node = self.op.src_node
653 src_path = self.op.src_path
654
655 if src_node is None:
656 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657 exp_list = self.rpc.call_export_list(locked_nodes)
658 found = False
659 for node in exp_list:
660 if exp_list[node].fail_msg:
661 continue
662 if src_path in exp_list[node].payload:
663 found = True
664 self.op.src_node = src_node = node
665 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
666 src_path)
667 break
668 if not found:
669 raise errors.OpPrereqError("No export found for relative path %s" %
670 src_path, errors.ECODE_INVAL)
671
672 CheckNodeOnline(self, src_node)
673 result = self.rpc.call_export_info(src_node, src_path)
674 result.Raise("No export or invalid export found in dir %s" % src_path)
675
676 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677 if not export_info.has_section(constants.INISECT_EXP):
678 raise errors.ProgrammerError("Corrupted export config",
679 errors.ECODE_ENVIRON)
680
681 ei_version = export_info.get(constants.INISECT_EXP, "version")
682 if (int(ei_version) != constants.EXPORT_VERSION):
683 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684 (ei_version, constants.EXPORT_VERSION),
685 errors.ECODE_ENVIRON)
686 return export_info
687
688 def _ReadExportParams(self, einfo):
689 """Use export parameters as defaults.
690
691 In case the opcode doesn't specify (as in override) some instance
692 parameters, then try to use them from the export information, if
693 that declares them.
694
695 """
696 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
697
698 if self.op.disk_template is None:
699 if einfo.has_option(constants.INISECT_INS, "disk_template"):
700 self.op.disk_template = einfo.get(constants.INISECT_INS,
701 "disk_template")
702 if self.op.disk_template not in constants.DISK_TEMPLATES:
703 raise errors.OpPrereqError("Disk template specified in configuration"
704 " file is not one of the allowed values:"
705 " %s" %
706 " ".join(constants.DISK_TEMPLATES),
707 errors.ECODE_INVAL)
708 else:
709 raise errors.OpPrereqError("No disk template specified and the export"
710 " is missing the disk_template information",
711 errors.ECODE_INVAL)
712
713 if not self.op.disks:
714 disks = []
715 # TODO: import the disk iv_name too
716 for idx in range(constants.MAX_DISKS):
717 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719 disks.append({constants.IDISK_SIZE: disk_sz})
720 self.op.disks = disks
721 if not disks and self.op.disk_template != constants.DT_DISKLESS:
722 raise errors.OpPrereqError("No disk info specified and the export"
723 " is missing the disk information",
724 errors.ECODE_INVAL)
725
726 if not self.op.nics:
727 nics = []
728 for idx in range(constants.MAX_NICS):
729 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
730 ndict = {}
731 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
733 ndict[name] = v
734 nics.append(ndict)
735 else:
736 break
737 self.op.nics = nics
738
739 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
741
742 if (self.op.hypervisor is None and
743 einfo.has_option(constants.INISECT_INS, "hypervisor")):
744 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
745
746 if einfo.has_section(constants.INISECT_HYP):
747 # use the export parameters but do not override the ones
748 # specified by the user
749 for name, value in einfo.items(constants.INISECT_HYP):
750 if name not in self.op.hvparams:
751 self.op.hvparams[name] = value
752
753 if einfo.has_section(constants.INISECT_BEP):
754 # use the parameters, without overriding
755 for name, value in einfo.items(constants.INISECT_BEP):
756 if name not in self.op.beparams:
757 self.op.beparams[name] = value
758 # Compatibility for the old "memory" be param
759 if name == constants.BE_MEMORY:
760 if constants.BE_MAXMEM not in self.op.beparams:
761 self.op.beparams[constants.BE_MAXMEM] = value
762 if constants.BE_MINMEM not in self.op.beparams:
763 self.op.beparams[constants.BE_MINMEM] = value
764 else:
765 # try to read the parameters old style, from the main section
766 for name in constants.BES_PARAMETERS:
767 if (name not in self.op.beparams and
768 einfo.has_option(constants.INISECT_INS, name)):
769 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
770
771 if einfo.has_section(constants.INISECT_OSP):
772 # use the parameters, without overriding
773 for name, value in einfo.items(constants.INISECT_OSP):
774 if name not in self.op.osparams:
775 self.op.osparams[name] = value
776
777 def _RevertToDefaults(self, cluster):
778 """Revert the instance parameters to the default values.
779
780 """
781 # hvparams
782 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783 for name in self.op.hvparams.keys():
784 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785 del self.op.hvparams[name]
786 # beparams
787 be_defs = cluster.SimpleFillBE({})
788 for name in self.op.beparams.keys():
789 if name in be_defs and be_defs[name] == self.op.beparams[name]:
790 del self.op.beparams[name]
791 # nic params
792 nic_defs = cluster.SimpleFillNIC({})
793 for nic in self.op.nics:
794 for name in constants.NICS_PARAMETERS:
795 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
796 del nic[name]
797 # osparams
798 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799 for name in self.op.osparams.keys():
800 if name in os_defs and os_defs[name] == self.op.osparams[name]:
801 del self.op.osparams[name]
802
803 def _CalculateFileStorageDir(self):
804 """Calculate final instance file storage dir.
805
806 """
807 # file storage dir calculation/check
808 self.instance_file_storage_dir = None
809 if self.op.disk_template in constants.DTS_FILEBASED:
810 # build the full file storage dir path
811 joinargs = []
812
813 if self.op.disk_template == constants.DT_SHARED_FILE:
814 get_fsd_fn = self.cfg.GetSharedFileStorageDir
815 else:
816 get_fsd_fn = self.cfg.GetFileStorageDir
817
818 cfg_storagedir = get_fsd_fn()
819 if not cfg_storagedir:
820 raise errors.OpPrereqError("Cluster file storage dir not defined",
821 errors.ECODE_STATE)
822 joinargs.append(cfg_storagedir)
823
824 if self.op.file_storage_dir is not None:
825 joinargs.append(self.op.file_storage_dir)
826
827 joinargs.append(self.op.instance_name)
828
829 # pylint: disable=W0142
830 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
831
832 def CheckPrereq(self): # pylint: disable=R0914
833 """Check prerequisites.
834
835 """
836 self._CalculateFileStorageDir()
837
838 if self.op.mode == constants.INSTANCE_IMPORT:
839 export_info = self._ReadExportInfo()
840 self._ReadExportParams(export_info)
841 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
842 else:
843 self._old_instance_name = None
844
845 if (not self.cfg.GetVGName() and
846 self.op.disk_template not in constants.DTS_NOT_LVM):
847 raise errors.OpPrereqError("Cluster does not support lvm-based"
848 " instances", errors.ECODE_STATE)
849
850 if (self.op.hypervisor is None or
851 self.op.hypervisor == constants.VALUE_AUTO):
852 self.op.hypervisor = self.cfg.GetHypervisorType()
853
854 cluster = self.cfg.GetClusterInfo()
855 enabled_hvs = cluster.enabled_hypervisors
856 if self.op.hypervisor not in enabled_hvs:
857 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
858 " cluster (%s)" %
859 (self.op.hypervisor, ",".join(enabled_hvs)),
860 errors.ECODE_STATE)
861
862 # Check tag validity
863 for tag in self.op.tags:
864 objects.TaggableObject.ValidateTag(tag)
865
866 # check hypervisor parameter syntax (locally)
867 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
869 self.op.hvparams)
870 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871 hv_type.CheckParameterSyntax(filled_hvp)
872 self.hv_full = filled_hvp
873 # check that we don't specify global parameters on an instance
874 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875 "instance", "cluster")
876
877 # fill and remember the beparams dict
878 self.be_full = _ComputeFullBeParams(self.op, cluster)
879
880 # build os parameters
881 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
882
883 # now that hvp/bep are in final format, let's reset to defaults,
884 # if told to do so
885 if self.op.identify_defaults:
886 self._RevertToDefaults(cluster)
887
888 # NIC buildup
889 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
890 self.proc.GetECId())
891
892 # disk checks/pre-build
893 default_vg = self.cfg.GetVGName()
894 self.disks = ComputeDisks(self.op, default_vg)
895
896 if self.op.mode == constants.INSTANCE_IMPORT:
897 disk_images = []
898 for idx in range(len(self.disks)):
899 option = "disk%d_dump" % idx
900 if export_info.has_option(constants.INISECT_INS, option):
901 # FIXME: are the old os-es, disk sizes, etc. useful?
902 export_name = export_info.get(constants.INISECT_INS, option)
903 image = utils.PathJoin(self.op.src_path, export_name)
904 disk_images.append(image)
905 else:
906 disk_images.append(False)
907
908 self.src_images = disk_images
909
910 if self.op.instance_name == self._old_instance_name:
911 for idx, nic in enumerate(self.nics):
912 if nic.mac == constants.VALUE_AUTO:
913 nic_mac_ini = "nic%d_mac" % idx
914 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
915
916 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
917
918 # ip ping checks (we use the same ip that was resolved in ExpandNames)
919 if self.op.ip_check:
920 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921 raise errors.OpPrereqError("IP %s of instance %s already in use" %
922 (self.check_ip, self.op.instance_name),
923 errors.ECODE_NOTUNIQUE)
924
925 #### mac address generation
926 # By generating here the mac address both the allocator and the hooks get
927 # the real final mac address rather than the 'auto' or 'generate' value.
928 # There is a race condition between the generation and the instance object
929 # creation, which means that we know the mac is valid now, but we're not
930 # sure it will be when we actually add the instance. If things go bad
931 # adding the instance will abort because of a duplicate mac, and the
932 # creation job will fail.
933 for nic in self.nics:
934 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
936
937 #### allocator run
938
939 if self.op.iallocator is not None:
940 self._RunAllocator()
941
942 # Release all unneeded node locks
943 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
947
948 assert (self.owned_locks(locking.LEVEL_NODE) ==
949 self.owned_locks(locking.LEVEL_NODE_RES)), \
950 "Node locks differ from node resource locks"
951
952 #### node related checks
953
954 # check primary node
955 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956 assert self.pnode is not None, \
957 "Cannot retrieve locked node %s" % self.op.pnode
958 if pnode.offline:
959 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960 pnode.name, errors.ECODE_STATE)
961 if pnode.drained:
962 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963 pnode.name, errors.ECODE_STATE)
964 if not pnode.vm_capable:
965 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966 " '%s'" % pnode.name, errors.ECODE_STATE)
967
968 self.secondaries = []
969
970 # Fill in any IPs from IP pools. This must happen here, because we need to
971 # know the nic's primary node, as specified by the iallocator
972 for idx, nic in enumerate(self.nics):
973 net_uuid = nic.network
974 if net_uuid is not None:
975 nobj = self.cfg.GetNetwork(net_uuid)
976 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977 if netparams is None:
978 raise errors.OpPrereqError("No netparams found for network"
979 " %s. Propably not connected to"
980 " node's %s nodegroup" %
981 (nobj.name, self.pnode.name),
982 errors.ECODE_INVAL)
983 self.LogInfo("NIC/%d inherits netparams %s" %
984 (idx, netparams.values()))
985 nic.nicparams = dict(netparams)
986 if nic.ip is not None:
987 if nic.ip.lower() == constants.NIC_IP_POOL:
988 try:
989 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990 except errors.ReservationError:
991 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992 " from the address pool" % idx,
993 errors.ECODE_STATE)
994 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
995 else:
996 try:
997 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998 except errors.ReservationError:
999 raise errors.OpPrereqError("IP address %s already in use"
1000 " or does not belong to network %s" %
1001 (nic.ip, nobj.name),
1002 errors.ECODE_NOTUNIQUE)
1003
1004 # net is None, ip None or given
1005 elif self.op.conflicts_check:
1006 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1007
1008 # mirror node verification
1009 if self.op.disk_template in constants.DTS_INT_MIRROR:
1010 if self.op.snode == pnode.name:
1011 raise errors.OpPrereqError("The secondary node cannot be the"
1012 " primary node", errors.ECODE_INVAL)
1013 CheckNodeOnline(self, self.op.snode)
1014 CheckNodeNotDrained(self, self.op.snode)
1015 CheckNodeVmCapable(self, self.op.snode)
1016 self.secondaries.append(self.op.snode)
1017
1018 snode = self.cfg.GetNodeInfo(self.op.snode)
1019 if pnode.group != snode.group:
1020 self.LogWarning("The primary and secondary nodes are in two"
1021 " different node groups; the disk parameters"
1022 " from the first disk's node group will be"
1023 " used")
1024
1025 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1026 nodes = [pnode]
1027 if self.op.disk_template in constants.DTS_INT_MIRROR:
1028 nodes.append(snode)
1029 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1030 if compat.any(map(has_es, nodes)):
1031 raise errors.OpPrereqError("Disk template %s not supported with"
1032 " exclusive storage" % self.op.disk_template,
1033 errors.ECODE_STATE)
1034
1035 nodenames = [pnode.name] + self.secondaries
1036
1037 if not self.adopt_disks:
1038 if self.op.disk_template == constants.DT_RBD:
1039 # _CheckRADOSFreeSpace() is just a placeholder.
1040 # Any function that checks prerequisites can be placed here.
1041 # Check if there is enough space on the RADOS cluster.
1042 CheckRADOSFreeSpace()
1043 elif self.op.disk_template == constants.DT_EXT:
1044 # FIXME: Function that checks prereqs if needed
1045 pass
1046 else:
1047 # Check lv size requirements, if not adopting
1048 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1049 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1050
1051 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1052 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1053 disk[constants.IDISK_ADOPT])
1054 for disk in self.disks])
1055 if len(all_lvs) != len(self.disks):
1056 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1057 errors.ECODE_INVAL)
1058 for lv_name in all_lvs:
1059 try:
1060 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1061 # to ReserveLV uses the same syntax
1062 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1063 except errors.ReservationError:
1064 raise errors.OpPrereqError("LV named %s used by another instance" %
1065 lv_name, errors.ECODE_NOTUNIQUE)
1066
1067 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1068 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1069
1070 node_lvs = self.rpc.call_lv_list([pnode.name],
1071 vg_names.payload.keys())[pnode.name]
1072 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1073 node_lvs = node_lvs.payload
1074
1075 delta = all_lvs.difference(node_lvs.keys())
1076 if delta:
1077 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1078 utils.CommaJoin(delta),
1079 errors.ECODE_INVAL)
1080 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1081 if online_lvs:
1082 raise errors.OpPrereqError("Online logical volumes found, cannot"
1083 " adopt: %s" % utils.CommaJoin(online_lvs),
1084 errors.ECODE_STATE)
1085 # update the size of disk based on what is found
1086 for dsk in self.disks:
1087 dsk[constants.IDISK_SIZE] = \
1088 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1089 dsk[constants.IDISK_ADOPT])][0]))
1090
1091 elif self.op.disk_template == constants.DT_BLOCK:
1092 # Normalize and de-duplicate device paths
1093 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1094 for disk in self.disks])
1095 if len(all_disks) != len(self.disks):
1096 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1097 errors.ECODE_INVAL)
1098 baddisks = [d for d in all_disks
1099 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1100 if baddisks:
1101 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1102 " cannot be adopted" %
1103 (utils.CommaJoin(baddisks),
1104 constants.ADOPTABLE_BLOCKDEV_ROOT),
1105 errors.ECODE_INVAL)
1106
1107 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1108 list(all_disks))[pnode.name]
1109 node_disks.Raise("Cannot get block device information from node %s" %
1110 pnode.name)
1111 node_disks = node_disks.payload
1112 delta = all_disks.difference(node_disks.keys())
1113 if delta:
1114 raise errors.OpPrereqError("Missing block device(s): %s" %
1115 utils.CommaJoin(delta),
1116 errors.ECODE_INVAL)
1117 for dsk in self.disks:
1118 dsk[constants.IDISK_SIZE] = \
1119 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1120
1121 # Verify instance specs
1122 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1123 ispec = {
1124 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1125 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1126 constants.ISPEC_DISK_COUNT: len(self.disks),
1127 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1128 for disk in self.disks],
1129 constants.ISPEC_NIC_COUNT: len(self.nics),
1130 constants.ISPEC_SPINDLE_USE: spindle_use,
1131 }
1132
1133 group_info = self.cfg.GetNodeGroup(pnode.group)
1134 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1135 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1136 self.op.disk_template)
1137 if not self.op.ignore_ipolicy and res:
1138 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1139 (pnode.group, group_info.name, utils.CommaJoin(res)))
1140 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1141
1142 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1143
1144 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1145 # check OS parameters (remotely)
1146 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1147
1148 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1149
1150 #TODO: _CheckExtParams (remotely)
1151 # Check parameters for extstorage
1152
1153 # memory check on primary node
1154 #TODO(dynmem): use MINMEM for checking
1155 if self.op.start:
1156 CheckNodeFreeMemory(self, self.pnode.name,
1157 "creating instance %s" % self.op.instance_name,
1158 self.be_full[constants.BE_MAXMEM],
1159 self.op.hypervisor)
1160
1161 self.dry_run_result = list(nodenames)
1162
1163 def Exec(self, feedback_fn):
1164 """Create and add the instance to the cluster.
1165
1166 """
1167 instance = self.op.instance_name
1168 pnode_name = self.pnode.name
1169
1170 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1171 self.owned_locks(locking.LEVEL_NODE)), \
1172 "Node locks differ from node resource locks"
1173 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1174
1175 ht_kind = self.op.hypervisor
1176 if ht_kind in constants.HTS_REQ_PORT:
1177 network_port = self.cfg.AllocatePort()
1178 else:
1179 network_port = None
1180
1181 # This is ugly but we got a chicken-egg problem here
1182 # We can only take the group disk parameters, as the instance
1183 # has no disks yet (we are generating them right here).
1184 node = self.cfg.GetNodeInfo(pnode_name)
1185 nodegroup = self.cfg.GetNodeGroup(node.group)
1186 disks = GenerateDiskTemplate(self,
1187 self.op.disk_template,
1188 instance, pnode_name,
1189 self.secondaries,
1190 self.disks,
1191 self.instance_file_storage_dir,
1192 self.op.file_driver,
1193 0,
1194 feedback_fn,
1195 self.cfg.GetGroupDiskParams(nodegroup))
1196
1197 iobj = objects.Instance(name=instance, os=self.op.os_type,
1198 primary_node=pnode_name,
1199 nics=self.nics, disks=disks,
1200 disk_template=self.op.disk_template,
1201 disks_active=False,
1202 admin_state=constants.ADMINST_DOWN,
1203 network_port=network_port,
1204 beparams=self.op.beparams,
1205 hvparams=self.op.hvparams,
1206 hypervisor=self.op.hypervisor,
1207 osparams=self.op.osparams,
1208 )
1209
1210 if self.op.tags:
1211 for tag in self.op.tags:
1212 iobj.AddTag(tag)
1213
1214 if self.adopt_disks:
1215 if self.op.disk_template == constants.DT_PLAIN:
1216 # rename LVs to the newly-generated names; we need to construct
1217 # 'fake' LV disks with the old data, plus the new unique_id
1218 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1219 rename_to = []
1220 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1221 rename_to.append(t_dsk.logical_id)
1222 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1223 self.cfg.SetDiskID(t_dsk, pnode_name)
1224 result = self.rpc.call_blockdev_rename(pnode_name,
1225 zip(tmp_disks, rename_to))
1226 result.Raise("Failed to rename adoped LVs")
1227 else:
1228 feedback_fn("* creating instance disks...")
1229 try:
1230 CreateDisks(self, iobj)
1231 except errors.OpExecError:
1232 self.LogWarning("Device creation failed")
1233 self.cfg.ReleaseDRBDMinors(instance)
1234 raise
1235
1236 feedback_fn("adding instance %s to cluster config" % instance)
1237
1238 self.cfg.AddInstance(iobj, self.proc.GetECId())
1239
1240 # Declare that we don't want to remove the instance lock anymore, as we've
1241 # added the instance to the config
1242 del self.remove_locks[locking.LEVEL_INSTANCE]
1243
1244 if self.op.mode == constants.INSTANCE_IMPORT:
1245 # Release unused nodes
1246 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1247 else:
1248 # Release all nodes
1249 ReleaseLocks(self, locking.LEVEL_NODE)
1250
1251 disk_abort = False
1252 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1253 feedback_fn("* wiping instance disks...")
1254 try:
1255 WipeDisks(self, iobj)
1256 except errors.OpExecError, err:
1257 logging.exception("Wiping disks failed")
1258 self.LogWarning("Wiping instance disks failed (%s)", err)
1259 disk_abort = True
1260
1261 if disk_abort:
1262 # Something is already wrong with the disks, don't do anything else
1263 pass
1264 elif self.op.wait_for_sync:
1265 disk_abort = not WaitForSync(self, iobj)
1266 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1267 # make sure the disks are not degraded (still sync-ing is ok)
1268 feedback_fn("* checking mirrors status")
1269 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1270 else:
1271 disk_abort = False
1272
1273 if disk_abort:
1274 RemoveDisks(self, iobj)
1275 self.cfg.RemoveInstance(iobj.name)
1276 # Make sure the instance lock gets removed
1277 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1278 raise errors.OpExecError("There are some degraded disks for"
1279 " this instance")
1280
1281 # instance disks are now active
1282 iobj.disks_active = True
1283
1284 # Release all node resource locks
1285 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1286
1287 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1288 # we need to set the disks ID to the primary node, since the
1289 # preceding code might or might have not done it, depending on
1290 # disk template and other options
1291 for disk in iobj.disks:
1292 self.cfg.SetDiskID(disk, pnode_name)
1293 if self.op.mode == constants.INSTANCE_CREATE:
1294 if not self.op.no_install:
1295 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1296 not self.op.wait_for_sync)
1297 if pause_sync:
1298 feedback_fn("* pausing disk sync to install instance OS")
1299 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1300 (iobj.disks,
1301 iobj), True)
1302 for idx, success in enumerate(result.payload):
1303 if not success:
1304 logging.warn("pause-sync of instance %s for disk %d failed",
1305 instance, idx)
1306
1307 feedback_fn("* running the instance OS create scripts...")
1308 # FIXME: pass debug option from opcode to backend
1309 os_add_result = \
1310 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1311 self.op.debug_level)
1312 if pause_sync:
1313 feedback_fn("* resuming disk sync")
1314 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1315 (iobj.disks,
1316 iobj), False)
1317 for idx, success in enumerate(result.payload):
1318 if not success:
1319 logging.warn("resume-sync of instance %s for disk %d failed",
1320 instance, idx)
1321
1322 os_add_result.Raise("Could not add os for instance %s"
1323 " on node %s" % (instance, pnode_name))
1324
1325 else:
1326 if self.op.mode == constants.INSTANCE_IMPORT:
1327 feedback_fn("* running the instance OS import scripts...")
1328
1329 transfers = []
1330
1331 for idx, image in enumerate(self.src_images):
1332 if not image:
1333 continue
1334
1335 # FIXME: pass debug option from opcode to backend
1336 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1337 constants.IEIO_FILE, (image, ),
1338 constants.IEIO_SCRIPT,
1339 (iobj.disks[idx], idx),
1340 None)
1341 transfers.append(dt)
1342
1343 import_result = \
1344 masterd.instance.TransferInstanceData(self, feedback_fn,
1345 self.op.src_node, pnode_name,
1346 self.pnode.secondary_ip,
1347 iobj, transfers)
1348 if not compat.all(import_result):
1349 self.LogWarning("Some disks for instance %s on node %s were not"
1350 " imported successfully" % (instance, pnode_name))
1351
1352 rename_from = self._old_instance_name
1353
1354 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1355 feedback_fn("* preparing remote import...")
1356 # The source cluster will stop the instance before attempting to make
1357 # a connection. In some cases stopping an instance can take a long
1358 # time, hence the shutdown timeout is added to the connection
1359 # timeout.
1360 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1361 self.op.source_shutdown_timeout)
1362 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1363
1364 assert iobj.primary_node == self.pnode.name
1365 disk_results = \
1366 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1367 self.source_x509_ca,
1368 self._cds, timeouts)
1369 if not compat.all(disk_results):
1370 # TODO: Should the instance still be started, even if some disks
1371 # failed to import (valid for local imports, too)?
1372 self.LogWarning("Some disks for instance %s on node %s were not"
1373 " imported successfully" % (instance, pnode_name))
1374
1375 rename_from = self.source_instance_name
1376
1377 else:
1378 # also checked in the prereq part
1379 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1380 % self.op.mode)
1381
1382 # Run rename script on newly imported instance
1383 assert iobj.name == instance
1384 feedback_fn("Running rename script for %s" % instance)
1385 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1386 rename_from,
1387 self.op.debug_level)
1388 if result.fail_msg:
1389 self.LogWarning("Failed to run rename script for %s on node"
1390 " %s: %s" % (instance, pnode_name, result.fail_msg))
1391
1392 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1393
1394 if self.op.start:
1395 iobj.admin_state = constants.ADMINST_UP
1396 self.cfg.Update(iobj, feedback_fn)
1397 logging.info("Starting instance %s on node %s", instance, pnode_name)
1398 feedback_fn("* starting instance...")
1399 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1400 False, self.op.reason)
1401 result.Raise("Could not start instance")
1402
1403 return list(iobj.all_nodes)
1404
1405
1406 class LUInstanceRename(LogicalUnit):
1407 """Rename an instance.
1408
1409 """
1410 HPATH = "instance-rename"
1411 HTYPE = constants.HTYPE_INSTANCE
1412
1413 def CheckArguments(self):
1414 """Check arguments.
1415
1416 """
1417 if self.op.ip_check and not self.op.name_check:
1418 # TODO: make the ip check more flexible and not depend on the name check
1419 raise errors.OpPrereqError("IP address check requires a name check",
1420 errors.ECODE_INVAL)
1421
1422 def BuildHooksEnv(self):
1423 """Build hooks env.
1424
1425 This runs on master, primary and secondary nodes of the instance.
1426
1427 """
1428 env = BuildInstanceHookEnvByObject(self, self.instance)
1429 env["INSTANCE_NEW_NAME"] = self.op.new_name
1430 return env
1431
1432 def BuildHooksNodes(self):
1433 """Build hooks nodes.
1434
1435 """
1436 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1437 return (nl, nl)
1438
1439 def CheckPrereq(self):
1440 """Check prerequisites.
1441
1442 This checks that the instance is in the cluster and is not running.
1443
1444 """
1445 self.op.instance_name = ExpandInstanceName(self.cfg,
1446 self.op.instance_name)
1447 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1448 assert instance is not None
1449 CheckNodeOnline(self, instance.primary_node)
1450 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1451 msg="cannot rename")
1452 self.instance = instance
1453
1454 new_name = self.op.new_name
1455 if self.op.name_check:
1456 hostname = _CheckHostnameSane(self, new_name)
1457 new_name = self.op.new_name = hostname.name
1458 if (self.op.ip_check and
1459 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1460 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1461 (hostname.ip, new_name),
1462 errors.ECODE_NOTUNIQUE)
1463
1464 instance_list = self.cfg.GetInstanceList()
1465 if new_name in instance_list and new_name != instance.name:
1466 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1467 new_name, errors.ECODE_EXISTS)
1468
1469 def Exec(self, feedback_fn):
1470 """Rename the instance.
1471
1472 """
1473 inst = self.instance
1474 old_name = inst.name
1475
1476 rename_file_storage = False
1477 if (inst.disk_template in constants.DTS_FILEBASED and
1478 self.op.new_name != inst.name):
1479 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1480 rename_file_storage = True
1481
1482 self.cfg.RenameInstance(inst.name, self.op.new_name)
1483 # Change the instance lock. This is definitely safe while we hold the BGL.
1484 # Otherwise the new lock would have to be added in acquired mode.
1485 assert self.REQ_BGL
1486 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1487 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1488 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1489
1490 # re-read the instance from the configuration after rename
1491 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1492
1493 if rename_file_storage:
1494 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1495 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1496 old_file_storage_dir,
1497 new_file_storage_dir)
1498 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1499 " (but the instance has been renamed in Ganeti)" %
1500 (inst.primary_node, old_file_storage_dir,
1501 new_file_storage_dir))
1502
1503 StartInstanceDisks(self, inst, None)
1504 # update info on disks
1505 info = GetInstanceInfoText(inst)
1506 for (idx, disk) in enumerate(inst.disks):
1507 for node in inst.all_nodes:
1508 self.cfg.SetDiskID(disk, node)
1509 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1510 if result.fail_msg:
1511 self.LogWarning("Error setting info on node %s for disk %s: %s",
1512 node, idx, result.fail_msg)
1513 try:
1514 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1515 old_name, self.op.debug_level)
1516 msg = result.fail_msg
1517 if msg:
1518 msg = ("Could not run OS rename script for instance %s on node %s"
1519 " (but the instance has been renamed in Ganeti): %s" %
1520 (inst.name, inst.primary_node, msg))
1521 self.LogWarning(msg)
1522 finally:
1523 ShutdownInstanceDisks(self, inst)
1524
1525 return inst.name
1526
1527
1528 class LUInstanceRemove(LogicalUnit):
1529 """Remove an instance.
1530
1531 """
1532 HPATH = "instance-remove"
1533 HTYPE = constants.HTYPE_INSTANCE
1534 REQ_BGL = False
1535
1536 def ExpandNames(self):
1537 self._ExpandAndLockInstance()
1538 self.needed_locks[locking.LEVEL_NODE] = []
1539 self.needed_locks[locking.LEVEL_NODE_RES] = []
1540 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1541
1542 def DeclareLocks(self, level):
1543 if level == locking.LEVEL_NODE:
1544 self._LockInstancesNodes()
1545 elif level == locking.LEVEL_NODE_RES:
1546 # Copy node locks
1547 self.needed_locks[locking.LEVEL_NODE_RES] = \
1548 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1549
1550 def BuildHooksEnv(self):
1551 """Build hooks env.
1552
1553 This runs on master, primary and secondary nodes of the instance.
1554
1555 """
1556 env = BuildInstanceHookEnvByObject(self, self.instance)
1557 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1558 return env
1559
1560 def BuildHooksNodes(self):
1561 """Build hooks nodes.
1562
1563 """
1564 nl = [self.cfg.GetMasterNode()]
1565 nl_post = list(self.instance.all_nodes) + nl
1566 return (nl, nl_post)
1567
1568 def CheckPrereq(self):
1569 """Check prerequisites.
1570
1571 This checks that the instance is in the cluster.
1572
1573 """
1574 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1575 assert self.instance is not None, \
1576 "Cannot retrieve locked instance %s" % self.op.instance_name
1577
1578 def Exec(self, feedback_fn):
1579 """Remove the instance.
1580
1581 """
1582 instance = self.instance
1583 logging.info("Shutting down instance %s on node %s",
1584 instance.name, instance.primary_node)
1585
1586 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1587 self.op.shutdown_timeout,
1588 self.op.reason)
1589 msg = result.fail_msg
1590 if msg:
1591 if self.op.ignore_failures:
1592 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1593 else:
1594 raise errors.OpExecError("Could not shutdown instance %s on"
1595 " node %s: %s" %
1596 (instance.name, instance.primary_node, msg))
1597
1598 assert (self.owned_locks(locking.LEVEL_NODE) ==
1599 self.owned_locks(locking.LEVEL_NODE_RES))
1600 assert not (set(instance.all_nodes) -
1601 self.owned_locks(locking.LEVEL_NODE)), \
1602 "Not owning correct locks"
1603
1604 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1605
1606
1607 class LUInstanceMove(LogicalUnit):
1608 """Move an instance by data-copying.
1609
1610 """
1611 HPATH = "instance-move"
1612 HTYPE = constants.HTYPE_INSTANCE
1613 REQ_BGL = False
1614
1615 def ExpandNames(self):
1616 self._ExpandAndLockInstance()
1617 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1618 self.op.target_node = target_node
1619 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1620 self.needed_locks[locking.LEVEL_NODE_RES] = []
1621 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1622
1623 def DeclareLocks(self, level):
1624 if level == locking.LEVEL_NODE:
1625 self._LockInstancesNodes(primary_only=True)
1626 elif level == locking.LEVEL_NODE_RES:
1627 # Copy node locks
1628 self.needed_locks[locking.LEVEL_NODE_RES] = \
1629 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1630
1631 def BuildHooksEnv(self):
1632 """Build hooks env.
1633
1634 This runs on master, primary and secondary nodes of the instance.
1635
1636 """
1637 env = {
1638 "TARGET_NODE": self.op.target_node,
1639 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1640 }
1641 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1642 return env
1643
1644 def BuildHooksNodes(self):
1645 """Build hooks nodes.
1646
1647 """
1648 nl = [
1649 self.cfg.GetMasterNode(),
1650 self.instance.primary_node,
1651 self.op.target_node,
1652 ]
1653 return (nl, nl)
1654
1655 def CheckPrereq(self):
1656 """Check prerequisites.
1657
1658 This checks that the instance is in the cluster.
1659
1660 """
1661 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1662 assert self.instance is not None, \
1663 "Cannot retrieve locked instance %s" % self.op.instance_name
1664
1665 if instance.disk_template not in constants.DTS_COPYABLE:
1666 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1667 instance.disk_template, errors.ECODE_STATE)
1668
1669 node = self.cfg.GetNodeInfo(self.op.target_node)
1670 assert node is not None, \
1671 "Cannot retrieve locked node %s" % self.op.target_node
1672
1673 self.target_node = target_node = node.name
1674
1675 if target_node == instance.primary_node:
1676 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1677 (instance.name, target_node),
1678 errors.ECODE_STATE)
1679
1680 bep = self.cfg.GetClusterInfo().FillBE(instance)
1681
1682 for idx, dsk in enumerate(instance.disks):
1683 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1684 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1685 " cannot copy" % idx, errors.ECODE_STATE)
1686
1687 CheckNodeOnline(self, target_node)
1688 CheckNodeNotDrained(self, target_node)
1689 CheckNodeVmCapable(self, target_node)
1690 cluster = self.cfg.GetClusterInfo()
1691 group_info = self.cfg.GetNodeGroup(node.group)
1692 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1693 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1694 ignore=self.op.ignore_ipolicy)
1695
1696 if instance.admin_state == constants.ADMINST_UP:
1697 # check memory requirements on the secondary node
1698 CheckNodeFreeMemory(self, target_node,
1699 "failing over instance %s" %
1700 instance.name, bep[constants.BE_MAXMEM],
1701 instance.hypervisor)
1702 else:
1703 self.LogInfo("Not checking memory on the secondary node as"
1704 " instance will not be started")
1705
1706 # check bridge existance
1707 CheckInstanceBridgesExist(self, instance, node=target_node)
1708
1709 def Exec(self, feedback_fn):
1710 """Move an instance.
1711
1712 The move is done by shutting it down on its present node, copying
1713 the data over (slow) and starting it on the new node.
1714
1715 """
1716 instance = self.instance
1717
1718 source_node = instance.primary_node
1719 target_node = self.target_node
1720
1721 self.LogInfo("Shutting down instance %s on source node %s",
1722 instance.name, source_node)
1723
1724 assert (self.owned_locks(locking.LEVEL_NODE) ==
1725 self.owned_locks(locking.LEVEL_NODE_RES))
1726
1727 result = self.rpc.call_instance_shutdown(source_node, instance,
1728 self.op.shutdown_timeout,
1729 self.op.reason)
1730 msg = result.fail_msg
1731 if msg:
1732 if self.op.ignore_consistency:
1733 self.LogWarning("Could not shutdown instance %s on node %s."
1734 " Proceeding anyway. Please make sure node"
1735 " %s is down. Error details: %s",
1736 instance.name, source_node, source_node, msg)
1737 else:
1738 raise errors.OpExecError("Could not shutdown instance %s on"
1739 " node %s: %s" %
1740 (instance.name, source_node, msg))
1741
1742 # create the target disks
1743 try:
1744 CreateDisks(self, instance, target_node=target_node)
1745 except errors.OpExecError:
1746 self.LogWarning("Device creation failed")
1747 self.cfg.ReleaseDRBDMinors(instance.name)
1748 raise
1749
1750 cluster_name = self.cfg.GetClusterInfo().cluster_name
1751
1752 errs = []
1753 # activate, get path, copy the data over
1754 for idx, disk in enumerate(instance.disks):
1755 self.LogInfo("Copying data for disk %d", idx)
1756 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1757 instance.name, True, idx)
1758 if result.fail_msg:
1759 self.LogWarning("Can't assemble newly created disk %d: %s",
1760 idx, result.fail_msg)
1761 errs.append(result.fail_msg)
1762 break
1763 dev_path = result.payload
1764 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1765 target_node, dev_path,
1766 cluster_name)
1767 if result.fail_msg:
1768 self.LogWarning("Can't copy data over for disk %d: %s",
1769 idx, result.fail_msg)
1770 errs.append(result.fail_msg)
1771 break
1772
1773 if errs:
1774 self.LogWarning("Some disks failed to copy, aborting")
1775 try:
1776 RemoveDisks(self, instance, target_node=target_node)
1777 finally:
1778 self.cfg.ReleaseDRBDMinors(instance.name)
1779 raise errors.OpExecError("Errors during disk copy: %s" %
1780 (",".join(errs),))
1781
1782 instance.primary_node = target_node
1783 self.cfg.Update(instance, feedback_fn)
1784
1785 self.LogInfo("Removing the disks on the original node")
1786 RemoveDisks(self, instance, target_node=source_node)
1787
1788 # Only start the instance if it's marked as up
1789 if instance.admin_state == constants.ADMINST_UP:
1790 self.LogInfo("Starting instance %s on node %s",
1791 instance.name, target_node)
1792
1793 disks_ok, _ = AssembleInstanceDisks(self, instance,
1794 ignore_secondaries=True)
1795 if not disks_ok:
1796 ShutdownInstanceDisks(self, instance)
1797 raise errors.OpExecError("Can't activate the instance's disks")
1798
1799 result = self.rpc.call_instance_start(target_node,
1800 (instance, None, None), False,
1801 self.op.reason)
1802 msg = result.fail_msg
1803 if msg:
1804 ShutdownInstanceDisks(self, instance)
1805 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1806 (instance.name, target_node, msg))
1807
1808
1809 class LUInstanceMultiAlloc(NoHooksLU):
1810 """Allocates multiple instances at the same time.
1811
1812 """
1813 REQ_BGL = False
1814
1815 def CheckArguments(self):
1816 """Check arguments.
1817
1818 """
1819 nodes = []
1820 for inst in self.op.instances:
1821 if inst.iallocator is not None:
1822 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1823 " instance objects", errors.ECODE_INVAL)
1824 nodes.append(bool(inst.pnode))
1825 if inst.disk_template in constants.DTS_INT_MIRROR:
1826 nodes.append(bool(inst.snode))
1827
1828 has_nodes = compat.any(nodes)
1829 if compat.all(nodes) ^ has_nodes:
1830 raise errors.OpPrereqError("There are instance objects providing"
1831 " pnode/snode while others do not",
1832 errors.ECODE_INVAL)
1833
1834 if self.op.iallocator is None:
1835 default_iallocator = self.cfg.GetDefaultIAllocator()
1836 if default_iallocator and has_nodes:
1837 self.op.iallocator = default_iallocator
1838 else:
1839 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1840 " given and no cluster-wide default"
1841 " iallocator found; please specify either"
1842 " an iallocator or nodes on the instances"
1843 " or set a cluster-wide default iallocator",
1844 errors.ECODE_INVAL)
1845
1846 _CheckOpportunisticLocking(self.op)
1847
1848 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1849 if dups:
1850 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1851 utils.CommaJoin(dups), errors.ECODE_INVAL)
1852
1853 def ExpandNames(self):
1854 """Calculate the locks.
1855
1856 """
1857 self.share_locks = ShareAll()
1858 self.needed_locks = {
1859 # iallocator will select nodes and even if no iallocator is used,
1860 # collisions with LUInstanceCreate should be avoided
1861 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1862 }
1863
1864 if self.op.iallocator:
1865 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1866 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1867
1868 if self.op.opportunistic_locking:
1869 self.opportunistic_locks[locking.LEVEL_NODE] = True
1870 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1871 else:
1872 nodeslist = []
1873 for inst in self.op.instances:
1874 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1875 nodeslist.append(inst.pnode)
1876 if inst.snode is not None:
1877 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1878 nodeslist.append(inst.snode)
1879
1880 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1881 # Lock resources of instance's primary and secondary nodes (copy to
1882 # prevent accidential modification)
1883 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1884
1885 def CheckPrereq(self):
1886 """Check prerequisite.
1887
1888 """
1889 cluster = self.cfg.GetClusterInfo()
1890 default_vg = self.cfg.GetVGName()
1891 ec_id = self.proc.GetECId()
1892
1893 if self.op.opportunistic_locking:
1894 # Only consider nodes for which a lock is held
1895 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1896 else:
1897 node_whitelist = None
1898
1899 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1900 _ComputeNics(op, cluster, None,
1901 self.cfg, ec_id),
1902 _ComputeFullBeParams(op, cluster),
1903 node_whitelist)
1904 for op in self.op.instances]
1905
1906 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1907 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1908
1909 ial.Run(self.op.iallocator)
1910
1911 if not ial.success:
1912 raise errors.OpPrereqError("Can't compute nodes using"
1913 " iallocator '%s': %s" %
1914 (self.op.iallocator, ial.info),
1915 errors.ECODE_NORES)
1916
1917 self.ia_result = ial.result
1918
1919 if self.op.dry_run:
1920 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1921 constants.JOB_IDS_KEY: [],
1922 })
1923
1924 def _ConstructPartialResult(self):
1925 """Contructs the partial result.
1926
1927 """
1928 (allocatable, failed) = self.ia_result
1929 return {
1930 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1931 map(compat.fst, allocatable),
1932 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1933 }
1934
1935 def Exec(self, feedback_fn):
1936 """Executes the opcode.
1937
1938 """
1939 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1940 (allocatable, failed) = self.ia_result
1941
1942 jobs = []
1943 for (name, nodes) in allocatable:
1944 op = op2inst.pop(name)
1945
1946 if len(nodes) > 1:
1947 (op.pnode, op.snode) = nodes
1948 else:
1949 (op.pnode,) = nodes
1950
1951 jobs.append([op])
1952
1953 missing = set(op2inst.keys()) - set(failed)
1954 assert not missing, \
1955 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1956
1957 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1958
1959
1960 class _InstNicModPrivate:
1961 """Data structure for network interface modifications.
1962
1963 Used by L{LUInstanceSetParams}.
1964
1965 """
1966 def __init__(self):
1967 self.params = None
1968 self.filled = None
1969
1970
1971 def _PrepareContainerMods(mods, private_fn):
1972 """Prepares a list of container modifications by adding a private data field.
1973
1974 @type mods: list of tuples; (operation, index, parameters)
1975 @param mods: List of modifications
1976 @type private_fn: callable or None
1977 @param private_fn: Callable for constructing a private data field for a
1978 modification
1979 @rtype: list
1980
1981 """
1982 if private_fn is None:
1983 fn = lambda: None
1984 else:
1985 fn = private_fn
1986
1987 return [(op, idx, params, fn()) for (op, idx, params) in mods]
1988
1989
1990 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1991 """Checks if nodes have enough physical CPUs
1992
1993 This function checks if all given nodes have the needed number of
1994 physical CPUs. In case any node has less CPUs or we cannot get the
1995 information from the node, this function raises an OpPrereqError
1996 exception.
1997
1998 @type lu: C{LogicalUnit}
1999 @param lu: a logical unit from which we get configuration data
2000 @type nodenames: C{list}
2001 @param nodenames: the list of node names to check
2002 @type requested: C{int}
2003 @param requested: the minimum acceptable number of physical CPUs
2004 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2005 or we cannot check the node
2006
2007 """
2008 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2009 for node in nodenames:
2010 info = nodeinfo[node]
2011 info.Raise("Cannot get current information from node %s" % node,
2012 prereq=True, ecode=errors.ECODE_ENVIRON)
2013 (_, _, (hv_info, )) = info.payload
2014 num_cpus = hv_info.get("cpu_total", None)
2015 if not isinstance(num_cpus, int):
2016 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2017 " on node %s, result was '%s'" %
2018 (node, num_cpus), errors.ECODE_ENVIRON)
2019 if requested > num_cpus:
2020 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2021 "required" % (node, num_cpus, requested),
2022 errors.ECODE_NORES)
2023
2024
2025 def GetItemFromContainer(identifier, kind, container):
2026 """Return the item refered by the identifier.
2027
2028 @type identifier: string
2029 @param identifier: Item index or name or UUID
2030 @type kind: string
2031 @param kind: One-word item description
2032 @type container: list
2033 @param container: Container to get the item from
2034
2035 """
2036 # Index
2037 try:
2038 idx = int(identifier)
2039 if idx == -1:
2040 # Append
2041 absidx = len(container) - 1
2042 elif idx < 0:
2043 raise IndexError("Not accepting negative indices other than -1")
2044 elif idx > len(container):
2045 raise IndexError("Got %s index %s, but there are only %s" %
2046 (kind, idx, len(container)))
2047 else:
2048 absidx = idx
2049 return (absidx, container[idx])
2050 except ValueError:
2051 pass
2052
2053 for idx, item in enumerate(container):
2054 if item.uuid == identifier or item.name == identifier:
2055 return (idx, item)
2056
2057 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2058 (kind, identifier), errors.ECODE_NOENT)
2059
2060
2061 def _ApplyContainerMods(kind, container, chgdesc, mods,
2062 create_fn, modify_fn, remove_fn):
2063 """Applies descriptions in C{mods} to C{container}.
2064
2065 @type kind: string
2066 @param kind: One-word item description
2067 @type container: list
2068 @param container: Container to modify
2069 @type chgdesc: None or list
2070 @param chgdesc: List of applied changes
2071 @type mods: list
2072 @param mods: Modifications as returned by L{_PrepareContainerMods}
2073 @type create_fn: callable
2074 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2075 receives absolute item index, parameters and private data object as added
2076 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2077 as list
2078 @type modify_fn: callable
2079 @param modify_fn: Callback for modifying an existing item
2080 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2081 and private data object as added by L{_PrepareContainerMods}, returns
2082 changes as list
2083 @type remove_fn: callable
2084 @param remove_fn: Callback on removing item; receives absolute item index,
2085 item and private data object as added by L{_PrepareContainerMods}
2086
2087 """
2088 for (op, identifier, params, private) in mods:
2089 changes = None
2090
2091 if op == constants.DDM_ADD:
2092 # Calculate where item will be added
2093 # When adding an item, identifier can only be an index
2094 try:
2095 idx = int(identifier)
2096 except ValueError:
2097 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2098 " identifier for %s" % constants.DDM_ADD,
2099 errors.ECODE_INVAL)
2100 if idx == -1:
2101 addidx = len(container)
2102 else:
2103 if idx < 0:
2104 raise IndexError("Not accepting negative indices other than -1")
2105 elif idx > len(container):
2106 raise IndexError("Got %s index %s, but there are only %s" %
2107 (kind, idx, len(container)))
2108 addidx = idx
2109
2110 if create_fn is None:
2111 item = params
2112 else:
2113 (item, changes) = create_fn(addidx, params, private)
2114
2115 if idx == -1:
2116 container.append(item)
2117 else:
2118 assert idx >= 0
2119 assert idx <= len(container)
2120 # list.insert does so before the specified index
2121 container.insert(idx, item)
2122 else:
2123 # Retrieve existing item
2124 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2125
2126 if op == constants.DDM_REMOVE:
2127 assert not params
2128
2129 if remove_fn is not None:
2130 remove_fn(absidx, item, private)
2131
2132 changes = [("%s/%s" % (kind, absidx), "remove")]
2133
2134 assert container[absidx] == item
2135 del container[absidx]
2136 elif op == constants.DDM_MODIFY:
2137 if modify_fn is not None:
2138 changes = modify_fn(absidx, item, params, private)
2139 else:
2140 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2141
2142 assert _TApplyContModsCbChanges(changes)
2143
2144 if not (chgdesc is None or changes is None):
2145 chgdesc.extend(changes)
2146
2147
2148 def _UpdateIvNames(base_index, disks):
2149 """Updates the C{iv_name} attribute of disks.
2150
2151 @type disks: list of L{objects.Disk}
2152
2153 """
2154 for (idx, disk) in enumerate(disks):
2155 disk.iv_name = "disk/%s" % (base_index + idx, )
2156
2157
2158 class LUInstanceSetParams(LogicalUnit):
2159 """Modifies an instances's parameters.
2160
2161 """
2162 HPATH = "instance-modify"
2163 HTYPE = constants.HTYPE_INSTANCE
2164 REQ_BGL = False
2165
2166 @staticmethod
2167 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2168 assert ht.TList(mods)
2169 assert not mods or len(mods[0]) in (2, 3)
2170
2171 if mods and len(mods[0]) == 2:
2172 result = []
2173
2174 addremove = 0
2175 for op, params in mods:
2176 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2177 result.append((op, -1, params))
2178 addremove += 1
2179
2180 if addremove > 1:
2181 raise errors.OpPrereqError("Only one %s add or remove operation is"
2182 " supported at a time" % kind,
2183 errors.ECODE_INVAL)
2184 else:
2185 result.append((constants.DDM_MODIFY, op, params))
2186
2187 assert verify_fn(result)
2188 else:
2189 result = mods
2190
2191 return result
2192
2193 @staticmethod
2194 def _CheckMods(kind, mods, key_types, item_fn):
2195 """Ensures requested disk/NIC modifications are valid.
2196
2197 """
2198 for (op, _, params) in mods:
2199 assert ht.TDict(params)
2200
2201 # If 'key_types' is an empty dict, we assume we have an
2202 # 'ext' template and thus do not ForceDictType
2203 if key_types:
2204 utils.ForceDictType(params, key_types)
2205
2206 if op == constants.DDM_REMOVE:
2207 if params:
2208 raise errors.OpPrereqError("No settings should be passed when"
2209 " removing a %s" % kind,
2210 errors.ECODE_INVAL)
2211 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2212 item_fn(op, params)
2213 else:
2214 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2215
2216 @staticmethod
2217 def _VerifyDiskModification(op, params):
2218 """Verifies a disk modification.
2219
2220 """
2221 if op == constants.DDM_ADD:
2222 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2223 if mode not in constants.DISK_ACCESS_SET:
2224 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2225 errors.ECODE_INVAL)
2226
2227 size = params.get(constants.IDISK_SIZE, None)
2228 if size is None:
2229 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2230 constants.IDISK_SIZE, errors.ECODE_INVAL)
2231
2232 try:
2233 size = int(size)
2234 except (TypeError, ValueError), err:
2235 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2236 errors.ECODE_INVAL)
2237
2238 params[constants.IDISK_SIZE] = size
2239 name = params.get(constants.IDISK_NAME, None)
2240 if name is not None and name.lower() == constants.VALUE_NONE:
2241 params[constants.IDISK_NAME] = None
2242
2243 elif op == constants.DDM_MODIFY:
2244 if constants.IDISK_SIZE in params:
2245 raise errors.OpPrereqError("Disk size change not possible, use"
2246 " grow-disk", errors.ECODE_INVAL)
2247 if len(params) > 2:
2248 raise errors.OpPrereqError("Disk modification doesn't support"
2249 " additional arbitrary parameters",
2250 errors.ECODE_INVAL)
2251 name = params.get(constants.IDISK_NAME, None)
2252 if name is not None and name.lower() == constants.VALUE_NONE:
2253 params[constants.IDISK_NAME] = None
2254
2255 @staticmethod
2256 def _VerifyNicModification(op, params):
2257 """Verifies a network interface modification.
2258
2259 """
2260 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2261 ip = params.get(constants.INIC_IP, None)
2262 name = params.get(constants.INIC_NAME, None)
2263 req_net = params.get(constants.INIC_NETWORK, None)
2264 link = params.get(constants.NIC_LINK, None)
2265 mode = params.get(constants.NIC_MODE, None)
2266 if name is not None and name.lower() == constants.VALUE_NONE:
2267 params[constants.INIC_NAME] = None
2268 if req_net is not None:
2269 if req_net.lower() == constants.VALUE_NONE:
2270 params[constants.INIC_NETWORK] = None
2271 req_net = None
2272 elif link is not None or mode is not None:
2273 raise errors.OpPrereqError("If network is given"
2274 " mode or link should not",
2275 errors.ECODE_INVAL)
2276
2277 if op == constants.DDM_ADD:
2278 macaddr = params.get(constants.INIC_MAC, None)
2279 if macaddr is None:
2280 params[constants.INIC_MAC] = constants.VALUE_AUTO
2281
2282 if ip is not None:
2283 if ip.lower() == constants.VALUE_NONE:
2284 params[constants.INIC_IP] = None
2285 else:
2286 if ip.lower() == constants.NIC_IP_POOL:
2287 if op == constants.DDM_ADD and req_net is None:
2288 raise errors.OpPrereqError("If ip=pool, parameter network"
2289 " cannot be none",
2290 errors.ECODE_INVAL)
2291 else:
2292 if not netutils.IPAddress.IsValid(ip):
2293 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2294 errors.ECODE_INVAL)
2295
2296 if constants.INIC_MAC in params:
2297 macaddr = params[constants.INIC_MAC]
2298 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2299 macaddr = utils.NormalizeAndValidateMac(macaddr)
2300
2301 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2302 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2303 " modifying an existing NIC",
2304 errors.ECODE_INVAL)
2305
2306 def CheckArguments(self):
2307 if not (self.op.nics or self.op.disks or self.op.disk_template or
2308 self.op.hvparams or self.op.beparams or self.op.os_name or
2309 self.op.offline is not None or self.op.runtime_mem or
2310 self.op.pnode):
2311 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2312
2313 if self.op.hvparams:
2314 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2315 "hypervisor", "instance", "cluster")
2316
2317 self.op.disks = self._UpgradeDiskNicMods(
2318 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2319 self.op.nics = self._UpgradeDiskNicMods(
2320 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2321
2322 if self.op.disks and self.op.disk_template is not None:
2323 raise errors.OpPrereqError("Disk template conversion and other disk"
2324 " changes not supported at the same time",
2325 errors.ECODE_INVAL)
2326
2327 if (self.op.disk_template and
2328 self.op.disk_template in constants.DTS_INT_MIRROR and
2329 self.op.remote_node is None):
2330 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2331 " one requires specifying a secondary node",
2332 errors.ECODE_INVAL)
2333
2334 # Check NIC modifications
2335 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2336 self._VerifyNicModification)
2337
2338 if self.op.pnode:
2339 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2340
2341 def ExpandNames(self):
2342 self._ExpandAndLockInstance()
2343 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2344 # Can't even acquire node locks in shared mode as upcoming changes in
2345 # Ganeti 2.6 will start to modify the node object on disk conversion
2346 self.needed_locks[locking.LEVEL_NODE] = []
2347 self.needed_locks[locking.LEVEL_NODE_RES] = []
2348 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2349 # Look node group to look up the ipolicy
2350 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2351
2352 def DeclareLocks(self, level):
2353 if level == locking.LEVEL_NODEGROUP:
2354 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2355 # Acquire locks for the instance's nodegroups optimistically. Needs
2356 # to be verified in CheckPrereq
2357 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2358 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2359 elif level == locking.LEVEL_NODE:
2360 self._LockInstancesNodes()
2361 if self.op.disk_template and self.op.remote_node:
2362 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2363 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2364 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2365 # Copy node locks
2366 self.needed_locks[locking.LEVEL_NODE_RES] = \
2367 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2368
2369 def BuildHooksEnv(self):
2370 """Build hooks env.
2371
2372 This runs on the master, primary and secondaries.
2373
2374 """
2375 args = {}
2376 if constants.BE_MINMEM in self.be_new:
2377 args["minmem"] = self.be_new[constants.BE_MINMEM]
2378 if constants.BE_MAXMEM in self.be_new:
2379 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2380 if constants.BE_VCPUS in self.be_new:
2381 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2382 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2383 # information at all.
2384
2385 if self._new_nics is not None:
2386 nics = []
2387
2388 for nic in self._new_nics:
2389 n = copy.deepcopy(nic)
2390 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2391 n.nicparams = nicparams
2392 nics.append(NICToTuple(self, n))
2393
2394 args["nics"] = nics
2395
2396 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2397 if self.op.disk_template:
2398 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2399 if self.op.runtime_mem:
2400 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2401
2402 return env
2403
2404 def BuildHooksNodes(self):
2405 """Build hooks nodes.
2406
2407 """
2408 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2409 return (nl, nl)
2410
2411 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2412 old_params, cluster, pnode):
2413
2414 update_params_dict = dict([(key, params[key])
2415 for key in constants.NICS_PARAMETERS
2416 if key in params])
2417
2418 req_link = update_params_dict.get(constants.NIC_LINK, None)
2419 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2420
2421 new_net_uuid = None
2422 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2423 if new_net_uuid_or_name:
2424 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2425 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2426
2427 if old_net_uuid:
2428 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2429
2430 if new_net_uuid:
2431 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2432 if not netparams:
2433 raise errors.OpPrereqError("No netparams found for the network"
2434 " %s, probably not connected" %
2435 new_net_obj.name, errors.ECODE_INVAL)
2436 new_params = dict(netparams)
2437 else:
2438 new_params = GetUpdatedParams(old_params, update_params_dict)
2439
2440 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2441
2442 new_filled_params = cluster.SimpleFillNIC(new_params)
2443 objects.NIC.CheckParameterSyntax(new_filled_params)
2444
2445 new_mode = new_filled_params[constants.NIC_MODE]
2446 if new_mode == constants.NIC_MODE_BRIDGED:
2447 bridge = new_filled_params[constants.NIC_LINK]
2448 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2449 if msg:
2450 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2451 if self.op.force:
2452 self.warn.append(msg)
2453 else:
2454 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2455
2456 elif new_mode == constants.NIC_MODE_ROUTED:
2457 ip = params.get(constants.INIC_IP, old_ip)
2458 if ip is None:
2459 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2460 " on a routed NIC", errors.ECODE_INVAL)
2461
2462 elif new_mode == constants.NIC_MODE_OVS:
2463 # TODO: check OVS link
2464 self.LogInfo("OVS links are currently not checked for correctness")
2465
2466 if constants.INIC_MAC in params:
2467 mac = params[constants.INIC_MAC]
2468 if mac is None:
2469 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2470 errors.ECODE_INVAL)
2471 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2472 # otherwise generate the MAC address
2473 params[constants.INIC_MAC] = \
2474 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2475 else:
2476 # or validate/reserve the current one
2477 try:
2478 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2479 except errors.ReservationError:
2480 raise errors.OpPrereqError("MAC address '%s' already in use"
2481 " in cluster" % mac,
2482 errors.ECODE_NOTUNIQUE)
2483 elif new_net_uuid != old_net_uuid:
2484
2485 def get_net_prefix(net_uuid):
2486 mac_prefix = None
2487 if net_uuid:
2488 nobj = self.cfg.GetNetwork(net_uuid)
2489 mac_prefix = nobj.mac_prefix
2490
2491 return mac_prefix
2492
2493 new_prefix = get_net_prefix(new_net_uuid)
2494 old_prefix = get_net_prefix(old_net_uuid)
2495 if old_prefix != new_prefix:
2496 params[constants.INIC_MAC] = \
2497 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2498
2499 # if there is a change in (ip, network) tuple
2500 new_ip = params.get(constants.INIC_IP, old_ip)
2501 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2502 if new_ip:
2503 # if IP is pool then require a network and generate one IP
2504 if new_ip.lower() == constants.NIC_IP_POOL:
2505 if new_net_uuid:
2506 try:
2507 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2508 except errors.ReservationError:
2509 raise errors.OpPrereqError("Unable to get a free IP"
2510 " from the address pool",
2511 errors.ECODE_STATE)
2512 self.LogInfo("Chose IP %s from network %s",
2513 new_ip,
2514 new_net_obj.name)
2515 params[constants.INIC_IP] = new_ip
2516 else:
2517 raise errors.OpPrereqError("ip=pool, but no network found",
2518 errors.ECODE_INVAL)
2519 # Reserve new IP if in the new network if any
2520 elif new_net_uuid:
2521 try:
2522 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2523 self.LogInfo("Reserving IP %s in network %s",
2524 new_ip, new_net_obj.name)
2525 except errors.ReservationError:
2526 raise errors.OpPrereqError("IP %s not available in network %s" %
2527 (new_ip, new_net_obj.name),
2528 errors.ECODE_NOTUNIQUE)
2529 # new network is None so check if new IP is a conflicting IP
2530 elif self.op.conflicts_check:
2531 _CheckForConflictingIp(self, new_ip, pnode)
2532
2533 # release old IP if old network is not None
2534 if old_ip and old_net_uuid:
2535 try:
2536 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2537 except errors.AddressPoolError:
2538 logging.warning("Release IP %s not contained in network %s",
2539 old_ip, old_net_obj.name)
2540
2541 # there are no changes in (ip, network) tuple and old network is not None
2542 elif (old_net_uuid is not None and
2543 (req_link is not None or req_mode is not None)):
2544 raise errors.OpPrereqError("Not allowed to change link or mode of"
2545 " a NIC that is connected to a network",
2546 errors.ECODE_INVAL)
2547
2548 private.params = new_params
2549 private.filled = new_filled_params
2550
2551 def _PreCheckDiskTemplate(self, pnode_info):
2552 """CheckPrereq checks related to a new disk template."""
2553 # Arguments are passed to avoid configuration lookups
2554 instance = self.instance
2555 pnode = instance.primary_node
2556 cluster = self.cluster
2557 if instance.disk_template == self.op.disk_template:
2558 raise errors.OpPrereqError("Instance already has disk template %s" %
2559 instance.disk_template, errors.ECODE_INVAL)
2560
2561 if (instance.disk_template,
2562 self.op.disk_template) not in self._DISK_CONVERSIONS:
2563 raise errors.OpPrereqError("Unsupported disk template conversion from"
2564 " %s to %s" % (instance.disk_template,
2565 self.op.disk_template),
2566 errors.ECODE_INVAL)
2567 CheckInstanceState(self, instance, INSTANCE_DOWN,
2568 msg="cannot change disk template")
2569 if self.op.disk_template in constants.DTS_INT_MIRROR:
2570 if self.op.remote_node == pnode:
2571 raise errors.OpPrereqError("Given new secondary node %s is the same"
2572 " as the primary node of the instance" %
2573 self.op.remote_node, errors.ECODE_STATE)
2574 CheckNodeOnline(self, self.op.remote_node)
2575 CheckNodeNotDrained(self, self.op.remote_node)
2576 # FIXME: here we assume that the old instance type is DT_PLAIN
2577 assert instance.disk_template == constants.DT_PLAIN
2578 disks = [{constants.IDISK_SIZE: d.size,
2579 constants.IDISK_VG: d.logical_id[0]}
2580 for d in instance.disks]
2581 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2582 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2583
2584 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2585 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2586 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2587 snode_group)
2588 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2589 ignore=self.op.ignore_ipolicy)
2590 if pnode_info.group != snode_info.group:
2591 self.LogWarning("The primary and secondary nodes are in two"
2592 " different node groups; the disk parameters"
2593 " from the first disk's node group will be"
2594 " used")
2595
2596 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2597 # Make sure none of the nodes require exclusive storage
2598 nodes = [pnode_info]
2599 if self.op.disk_template in constants.DTS_INT_MIRROR:
2600 assert snode_info
2601 nodes.append(snode_info)
2602 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2603 if compat.any(map(has_es, nodes)):
2604 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2605 " storage is enabled" % (instance.disk_template,
2606 self.op.disk_template))
2607 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2608
2609 def CheckPrereq(self):
2610 """Check prerequisites.
2611
2612 This only checks the instance list against the existing names.
2613
2614 """
2615 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2616 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2617
2618 cluster = self.cluster = self.cfg.GetClusterInfo()
2619 assert self.instance is not None, \
2620 "Cannot retrieve locked instance %s" % self.op.instance_name
2621
2622 pnode = instance.primary_node
2623
2624 self.warn = []
2625
2626 if (self.op.pnode is not None and self.op.pnode != pnode and
2627 not self.op.force):
2628 # verify that the instance is not up
2629 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2630 instance.hypervisor)
2631 if instance_info.fail_msg:
2632 self.warn.append("Can't get instance runtime information: %s" %
2633 instance_info.fail_msg)
2634 elif instance_info.payload:
2635 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2636 errors.ECODE_STATE)
2637
2638 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2639 nodelist = list(instance.all_nodes)
2640 pnode_info = self.cfg.GetNodeInfo(pnode)
2641 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2642
2643 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2644 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2645 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2646
2647 # dictionary with instance information after the modification
2648 ispec = {}
2649
2650 # Check disk modifications. This is done here and not in CheckArguments
2651 # (as with NICs), because we need to know the instance's disk template
2652 if instance.disk_template == constants.DT_EXT:
2653 self._CheckMods("disk", self.op.disks, {},
2654 self._VerifyDiskModification)
2655 else:
2656 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2657 self._VerifyDiskModification)
2658
2659 # Prepare disk/NIC modifications
2660 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2661 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2662
2663 # Check the validity of the `provider' parameter
2664 if instance.disk_template in constants.DT_EXT:
2665 for mod in self.diskmod:
2666 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2667 if mod[0] == constants.DDM_ADD:
2668 if ext_provider is None:
2669 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2670 " '%s' missing, during disk add" %
2671 (constants.DT_EXT,
2672 constants.IDISK_PROVIDER),
2673 errors.ECODE_NOENT)
2674 elif mod[0] == constants.DDM_MODIFY:
2675 if ext_provider:
2676 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2677 " modification" %
2678 constants.IDISK_PROVIDER,
2679 errors.ECODE_INVAL)
2680 else:
2681 for mod in self.diskmod:
2682 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2683 if ext_provider is not None:
2684 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2685 " instances of type '%s'" %
2686 (constants.IDISK_PROVIDER,
2687 constants.DT_EXT),
2688 errors.ECODE_INVAL)
2689
2690 # OS change
2691 if self.op.os_name and not self.op.force:
2692 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2693 self.op.force_variant)
2694 instance_os = self.op.os_name
2695 else:
2696 instance_os = instance.os
2697
2698 assert not (self.op.disk_template and self.op.disks), \
2699 "Can't modify disk template and apply disk changes at the same time"
2700
2701 if self.op.disk_template:
2702 self._PreCheckDiskTemplate(pnode_info)
2703
2704 # hvparams processing
2705 if self.op.hvparams:
2706 hv_type = instance.hypervisor
2707 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2708 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2709 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2710
2711 # local check
2712 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2713 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2714 self.hv_proposed = self.hv_new = hv_new # the new actual values
2715 self.hv_inst = i_hvdict # the new dict (without defaults)
2716 else:
2717 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2718 instance.hvparams)
2719 self.hv_new = self.hv_inst = {}
2720
2721 # beparams processing
2722 if self.op.beparams:
2723 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2724 use_none=True)
2725 objects.UpgradeBeParams(i_bedict)
2726 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2727 be_new = cluster.SimpleFillBE(i_bedict)
2728 self.be_proposed = self.be_new = be_new # the new actual values
2729 self.be_inst = i_bedict # the new dict (without defaults)
2730 else:
2731 self.be_new = self.be_inst = {}
2732 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2733 be_old = cluster.FillBE(instance)
2734
2735 # CPU param validation -- checking every time a parameter is
2736 # changed to cover all cases where either CPU mask or vcpus have
2737 # changed
2738 if (constants.BE_VCPUS in self.be_proposed and
2739 constants.HV_CPU_MASK in self.hv_proposed):
2740 cpu_list = \
2741 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2742 # Verify mask is consistent with number of vCPUs. Can skip this
2743 # test if only 1 entry in the CPU mask, which means same mask
2744 # is applied to all vCPUs.
2745 if (len(cpu_list) > 1 and
2746 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2747 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2748 " CPU mask [%s]" %
2749 (self.be_proposed[constants.BE_VCPUS],
2750 self.hv_proposed[constants.HV_CPU_MASK]),
2751 errors.ECODE_INVAL)
2752
2753 # Only perform this test if a new CPU mask is given
2754 if constants.HV_CPU_MASK in self.hv_new:
2755 # Calculate the largest CPU number requested
2756 max_requested_cpu = max(map(max, cpu_list))
2757 # Check that all of the instance's nodes have enough physical CPUs to
2758 # satisfy the requested CPU mask
2759 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2760 max_requested_cpu + 1, instance.hypervisor)
2761
2762 # osparams processing
2763 if self.op.osparams:
2764 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2765 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2766 self.os_inst = i_osdict # the new dict (without defaults)
2767 else:
2768 self.os_inst = {}
2769
2770 #TODO(dynmem): do the appropriate check involving MINMEM
2771 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2772 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2773 mem_check_list = [pnode]
2774 if be_new[constants.BE_AUTO_BALANCE]:
2775 # either we changed auto_balance to yes or it was from before
2776 mem_check_list.extend(instance.secondary_nodes)
2777 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2778 instance.hypervisor)
2779 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2780 [instance.hypervisor], False)
2781 pninfo = nodeinfo[pnode]
2782 msg = pninfo.fail_msg
2783 if msg:
2784 # Assume the primary node is unreachable and go ahead
2785 self.warn.append("Can't get info from primary node %s: %s" %
2786 (pnode, msg))
2787 else:
2788 (_, _, (pnhvinfo, )) = pninfo.payload
2789 if not isinstance(pnhvinfo.get("memory_free", None), int):
2790 self.warn.append("Node data from primary node %s doesn't contain"
2791 " free memory information" % pnode)
2792 elif instance_info.fail_msg:
2793 self.warn.append("Can't get instance runtime information: %s" %
2794 instance_info.fail_msg)
2795 else:
2796 if instance_info.payload:
2797 current_mem = int(instance_info.payload["memory"])
2798 else:
2799 # Assume instance not running
2800 # (there is a slight race condition here, but it's not very
2801 # probable, and we have no other way to check)
2802 # TODO: Describe race condition
2803 current_mem = 0
2804 #TODO(dynmem): do the appropriate check involving MINMEM
2805 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2806 pnhvinfo["memory_free"])
2807 if miss_mem > 0:
2808 raise errors.OpPrereqError("This change will prevent the instance"
2809 " from starting, due to %d MB of memory"
2810 " missing on its primary node" %
2811 miss_mem, errors.ECODE_NORES)
2812
2813 if be_new[constants.BE_AUTO_BALANCE]:
2814 for node, nres in nodeinfo.items():
2815 if node not in instance.secondary_nodes:
2816 continue
2817 nres.Raise("Can't get info from secondary node %s" % node,
2818 prereq=True, ecode=errors.ECODE_STATE)
2819 (_, _, (nhvinfo, )) = nres.payload
2820 if not isinstance(nhvinfo.get("memory_free", None), int):
2821 raise errors.OpPrereqError("Secondary node %s didn't return free"
2822 " memory information" % node,
2823 errors.ECODE_STATE)
2824 #TODO(dynmem): do the appropriate check involving MINMEM
2825 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2826 raise errors.OpPrereqError("This change will prevent the instance"
2827 " from failover to its secondary node"
2828 " %s, due to not enough memory" % node,
2829 errors.ECODE_STATE)
2830
2831 if self.op.runtime_mem:
2832 remote_info = self.rpc.call_instance_info(instance.primary_node,
2833 instance.name,
2834 instance.hypervisor)
2835 remote_info.Raise("Error checking node %s" % instance.primary_node)
2836 if not remote_info.payload: # not running already
2837 raise errors.OpPrereqError("Instance %s is not running" %
2838 instance.name, errors.ECODE_STATE)
2839
2840 current_memory = remote_info.payload["memory"]
2841 if (not self.op.force and
2842 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2843 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2844 raise errors.OpPrereqError("Instance %s must have memory between %d"
2845 " and %d MB of memory unless --force is"
2846 " given" %
2847 (instance.name,
2848 self.be_proposed[constants.BE_MINMEM],
2849 self.be_proposed[constants.BE_MAXMEM]),
2850 errors.ECODE_INVAL)
2851
2852 delta = self.op.runtime_mem - current_memory
2853 if delta > 0:
2854 CheckNodeFreeMemory(self, instance.primary_node,
2855 "ballooning memory for instance %s" %
2856 instance.name, delta, instance.hypervisor)
2857
2858 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2859 raise errors.OpPrereqError("Disk operations not supported for"
2860 " diskless instances", errors.ECODE_INVAL)
2861
2862 def _PrepareNicCreate(_, params, private):
2863 self._PrepareNicModification(params, private, None, None,
2864 {}, cluster, pnode)
2865 return (None, None)
2866
2867 def _PrepareNicMod(_, nic, params, private):
2868 self._PrepareNicModification(params, private, nic.ip, nic.network,
2869 nic.nicparams, cluster, pnode)
2870 return None
2871
2872 def _PrepareNicRemove(_, params, __):
2873 ip = params.ip
2874 net = params.network
2875 if net is not None and ip is not None:
2876 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2877
2878 # Verify NIC changes (operating on copy)
2879 nics = instance.nics[:]
2880 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2881 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2882 if len(nics) > constants.MAX_NICS:
2883 raise errors.OpPrereqError("Instance has too many network interfaces"
2884 " (%d), cannot add more" % constants.MAX_NICS,
2885 errors.ECODE_STATE)
2886
2887 def _PrepareDiskMod(_, disk, params, __):
2888 disk.name = params.get(constants.IDISK_NAME, None)
2889
2890 # Verify disk changes (operating on a copy)
2891 disks = copy.deepcopy(instance.disks)
2892 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2893 _PrepareDiskMod, None)
2894 utils.ValidateDeviceNames("disk", disks)
2895 if len(disks) > constants.MAX_DISKS:
2896 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2897 " more" % constants.MAX_DISKS,
2898 errors.ECODE_STATE)
2899 disk_sizes = [disk.size for disk in instance.disks]
2900 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2901 self.diskmod if op == constants.DDM_ADD)
2902 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2903 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2904
2905 if self.op.offline is not None and self.op.offline:
2906 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2907 msg="can't change to offline")
2908
2909 # Pre-compute NIC changes (necessary to use result in hooks)
2910 self._nic_chgdesc = []
2911 if self.nicmod:
2912 # Operate on copies as this is still in prereq
2913 nics = [nic.Copy() for nic in instance.nics]
2914 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2915 self._CreateNewNic, self._ApplyNicMods, None)
2916 # Verify that NIC names are unique and valid
2917 utils.ValidateDeviceNames("NIC", nics)
2918 self._new_nics = nics
2919 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2920 else:
2921 self._new_nics = None
2922 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2923
2924 if not self.op.ignore_ipolicy:
2925 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2926 group_info)
2927
2928 # Fill ispec with backend parameters
2929 ispec[constants.ISPEC_SPINDLE_USE] = \
2930 self.be_new.get(constants.BE_SPINDLE_USE, None)
2931 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2932 None)
2933
2934 # Copy ispec to verify parameters with min/max values separately
2935 if self.op.disk_template:
2936 new_disk_template = self.op.disk_template
2937 else:
2938 new_disk_template = instance.disk_template
2939 ispec_max = ispec.copy()
2940 ispec_max[constants.ISPEC_MEM_SIZE] = \
2941 self.be_new.get(constants.BE_MAXMEM, None)
2942 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2943 new_disk_template)
2944 ispec_min = ispec.copy()
2945 ispec_min[constants.ISPEC_MEM_SIZE] = \
2946 self.be_new.get(constants.BE_MINMEM, None)
2947 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2948 new_disk_template)
2949
2950 if (res_max or res_min):
2951 # FIXME: Improve error message by including information about whether
2952 # the upper or lower limit of the parameter fails the ipolicy.
2953 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2954 (group_info, group_info.name,
2955 utils.CommaJoin(set(res_max + res_min))))
2956 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2957
2958 def _ConvertPlainToDrbd(self, feedback_fn):
2959 """Converts an instance from plain to drbd.
2960
2961 """
2962 feedback_fn("Converting template to drbd")
2963 instance = self.instance
2964 pnode = instance.primary_node
2965 snode = self.op.remote_node
2966
2967 assert instance.disk_template == constants.DT_PLAIN
2968
2969 # create a fake disk info for _GenerateDiskTemplate
2970 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2971 constants.IDISK_VG: d.logical_id[0],
2972 constants.IDISK_NAME: d.name}
2973 for d in instance.disks]
2974 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2975 instance.name, pnode, [snode],
2976 disk_info, None, None, 0, feedback_fn,
2977 self.diskparams)
2978 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2979 self.diskparams)
2980 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2981 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2982 info = GetInstanceInfoText(instance)
2983 feedback_fn("Creating additional volumes...")
2984 # first, create the missing data and meta devices
2985 for disk in anno_disks:
2986 # unfortunately this is... not too nice
2987 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2988 info, True, p_excl_stor)
2989 for child in disk.children:
2990 CreateSingleBlockDev(self, snode, instance, child, info, True,
2991 s_excl_stor)
2992 # at this stage, all new LVs have been created, we can rename the
2993 # old ones
2994 feedback_fn("Renaming original volumes...")
2995 rename_list = [(o, n.children[0].logical_id)
2996 for (o, n) in zip(instance.disks, new_disks)]
2997 result = self.rpc.call_blockdev_rename(pnode, rename_list)
2998 result.Raise("Failed to rename original LVs")
2999
3000 feedback_fn("Initializing DRBD devices...")
3001 # all child devices are in place, we can now create the DRBD devices
3002 try:
3003 for disk in anno_disks:
3004 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3005 f_create = node == pnode
3006 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3007 excl_stor)
3008 except errors.GenericError, e:
3009 feedback_fn("Initializing of DRBD devices failed;"
3010 " renaming back original volumes...")
3011 for disk in new_disks:
3012 self.cfg.SetDiskID(disk, pnode)
3013 rename_back_list = [(n.children[0], o.logical_id)
3014 for (n, o) in zip(new_disks, instance.disks)]
3015 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3016 result.Raise("Failed to rename LVs back after error %s" % str(e))
3017 raise
3018
3019 # at this point, the instance has been modified
3020 instance.disk_template = constants.DT_DRBD8
3021 instance.disks = new_disks
3022 self.cfg.Update(instance, feedback_fn)
3023
3024 # Release node locks while waiting for sync
3025 ReleaseLocks(self, locking.LEVEL_NODE)
3026
3027 # disks are created, waiting for sync
3028 disk_abort = not WaitForSync(self, instance,
3029 oneshot=not self.op.wait_for_sync)
3030 if disk_abort:
3031 raise errors.OpExecError("There are some degraded disks for"
3032 " this instance, please cleanup manually")
3033
3034 # Node resource locks will be released by caller
3035
3036 def _ConvertDrbdToPlain(self, feedback_fn):
3037 """Converts an instance from drbd to plain.
3038
3039 """
3040 instance = self.instance
3041
3042 assert len(instance.secondary_nodes) == 1
3043 assert instance.disk_template == constants.DT_DRBD8
3044
3045 pnode = instance.primary_node
3046 snode = instance.secondary_nodes[0]
3047 feedback_fn("Converting template to plain")
3048
3049 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3050 new_disks = [d.children[0] for d in instance.disks]
3051
3052 # copy over size, mode and name
3053 for parent, child in zip(old_disks, new_disks):
3054 child.size = parent.size
3055 child.mode = parent.mode
3056 child.name = parent.name
3057
3058 # this is a DRBD disk, return its port to the pool
3059 # NOTE: this must be done right before the call to cfg.Update!
3060 for disk in old_disks:
3061 tcp_port = disk.logical_id[2]
3062 self.cfg.AddTcpUdpPort(tcp_port)
3063
3064 # update instance structure
3065 instance.disks = new_disks
3066 instance.disk_template = constants.DT_PLAIN
3067 _UpdateIvNames(0, instance.disks)
3068 self.cfg.Update(instance, feedback_fn)
3069
3070 # Release locks in case removing disks takes a while
3071 ReleaseLocks(self, locking.LEVEL_NODE)
3072
3073 feedback_fn("Removing volumes on the secondary node...")
3074 for disk in old_disks:
3075 self.cfg.SetDiskID(disk, snode)
3076 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3077 if msg:
3078 self.LogWarning("Could not remove block device %s on node %s,"
3079 " continuing anyway: %s", disk.iv_name, snode, msg)
3080
3081 feedback_fn("Removing unneeded volumes on the primary node...")
3082 for idx, disk in enumerate(old_disks):
3083 meta = disk.children[1]
3084 self.cfg.SetDiskID(meta, pnode)
3085 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3086 if msg:
3087 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3088 " continuing anyway: %s", idx, pnode, msg)
3089
3090 def _CreateNewDisk(self, idx, params, _):
3091 """Creates a new disk.
3092
3093 """
3094 instance = self.instance
3095
3096 # add a new disk
3097 if instance.disk_template in constants.DTS_FILEBASED:
3098 (file_driver, file_path) = instance.disks[0].logical_id
3099 file_path = os.path.dirname(file_path)
3100 else:
3101 file_driver = file_path = None
3102
3103 disk = \
3104 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3105 instance.primary_node, instance.secondary_nodes,
3106 [params], file_path, file_driver, idx,
3107 self.Log, self.diskparams)[0]
3108
3109 new_disks = CreateDisks(self, instance, disks=[disk])
3110
3111 if self.cluster.prealloc_wipe_disks:
3112 # Wipe new disk
3113 WipeOrCleanupDisks(self, instance,
3114 disks=[(idx, disk, 0)],
3115 cleanup=new_disks)
3116
3117 return (disk, [
3118 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3119 ])
3120
3121 @staticmethod
3122 def _ModifyDisk(idx, disk, params, _):
3123 """Modifies a disk.
3124
3125 """
3126 changes = []
3127 mode = params.get(constants.IDISK_MODE, None)
3128 if mode:
3129 disk.mode = mode
3130 changes.append(("disk.mode/%d" % idx, disk.mode))
3131
3132 name = params.get(constants.IDISK_NAME, None)
3133 disk.name = name
3134 changes.append(("disk.name/%d" % idx, disk.name))
3135
3136 return changes
3137
3138 def _RemoveDisk(self, idx, root, _):
3139 """Removes a disk.
3140
3141 """
3142 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3143 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3144 self.cfg.SetDiskID(disk, node)
3145 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3146 if msg:
3147 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3148 " continuing anyway", idx, node, msg)
3149
3150 # if this is a DRBD disk, return its port to the pool
3151 if root.dev_type in constants.LDS_DRBD:
3152 self.cfg.AddTcpUdpPort(root.logical_id[2])
3153
3154 def _CreateNewNic(self, idx, params, private):
3155 """Creates data structure for a new network interface.
3156
3157 """
3158 mac = params[constants.INIC_MAC]
3159 ip = params.get(constants.INIC_IP, None)
3160 net = params.get(constants.INIC_NETWORK, None)
3161 name = params.get(constants.INIC_NAME, None)
3162 net_uuid = self.cfg.LookupNetwork(net)
3163 #TODO: not private.filled?? can a nic have no nicparams??
3164 nicparams = private.filled
3165 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3166 nicparams=nicparams)
3167 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3168
3169 return (nobj, [
3170 ("nic.%d" % idx,
3171 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3172 (mac, ip, private.filled[constants.NIC_MODE],
3173 private.filled[constants.NIC_LINK],
3174 net)),
3175 ])
3176
3177 def _ApplyNicMods(self, idx, nic, params, private):
3178 """Modifies a network interface.
3179
3180 """
3181 changes = []
3182
3183 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3184 if key in params:
3185 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3186 setattr(nic, key, params[key])
3187
3188 new_net = params.get(constants.INIC_NETWORK, nic.network)
3189 new_net_uuid = self.cfg.LookupNetwork(new_net)
3190 if new_net_uuid != nic.network:
3191 changes.append(("nic.network/%d" % idx, new_net))
3192 nic.network = new_net_uuid
3193
3194 if private.filled:
3195 nic.nicparams = private.filled
3196
3197 for (key, val) in nic.nicparams.items():
3198 changes.append(("nic.%s/%d" % (key, idx), val))
3199
3200 return changes
3201
3202 def Exec(self, feedback_fn):
3203 """Modifies an instance.
3204
3205 All parameters take effect only at the next restart of the instance.
3206
3207 """
3208 # Process here the warnings from CheckPrereq, as we don't have a
3209 # feedback_fn there.
3210 # TODO: Replace with self.LogWarning
3211 for warn in self.warn:
3212 feedback_fn("WARNING: %s" % warn)
3213
3214 assert ((self.op.disk_template is None) ^
3215 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3216 "Not owning any node resource locks"
3217
3218 result = []
3219 instance = self.instance
3220
3221 # New primary node
3222 if self.op.pnode:
3223 instance.primary_node = self.op.pnode
3224
3225 # runtime memory
3226 if self.op.runtime_mem:
3227 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3228 instance,
3229 self.op.runtime_mem)
3230 rpcres.Raise("Cannot modify instance runtime memory")
3231 result.append(("runtime_memory", self.op.runtime_mem))
3232
3233 # Apply disk changes
3234 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3235 self._CreateNewDisk, self._ModifyDisk,
3236 self._RemoveDisk)
3237 _UpdateIvNames(0, instance.disks)
3238
3239 if self.op.disk_template:
3240 if __debug__:
3241 check_nodes = set(instance.all_nodes)
3242 if self.op.remote_node:
3243 check_nodes.add(self.op.remote_node)
3244 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3245 owned = self.owned_locks(level)
3246 assert not (check_nodes - owned), \
3247 ("Not owning the correct locks, owning %r, expected at least %r" %
3248 (owned, check_nodes))
3249
3250 r_shut = ShutdownInstanceDisks(self, instance)
3251 if not r_shut:
3252 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3253 " proceed with disk template conversion")
3254 mode = (instance.disk_template, self.op.disk_template)
3255 try:
3256 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3257 except:
3258 self.cfg.ReleaseDRBDMinors(instance.name)
3259 raise
3260 result.append(("disk_template", self.op.disk_template))
3261
3262 assert instance.disk_template == self.op.disk_template, \
3263 ("Expected disk template '%s', found '%s'" %
3264 (self.op.disk_template, instance.disk_template))
3265
3266 # Release node and resource locks if there are any (they might already have
3267 # been released during disk conversion)
3268 ReleaseLocks(self, locking.LEVEL_NODE)
3269 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3270
3271 # Apply NIC changes
3272 if self._new_nics is not None:
3273 instance.nics = self._new_nics
3274 result.extend(self._nic_chgdesc)
3275
3276 # hvparams changes
3277 if self.op.hvparams:
3278 instance.hvparams = self.hv_inst
3279 for key, val in self.op.hvparams.iteritems():
3280 result.append(("hv/%s" % key, val))
3281
3282 # beparams changes
3283 if self.op.beparams:
3284 instance.beparams = self.be_inst
3285 for key, val in self.op.beparams.iteritems():
3286 result.append(("be/%s" % key, val))
3287
3288 # OS change
3289 if self.op.os_name:
3290 instance.os = self.op.os_name
3291
3292 # osparams changes
3293 if self.op.osparams:
3294 instance.osparams = self.os_inst
3295 for key, val in self.op.osparams.iteritems():
3296 result.append(("os/%s" % key, val))
3297
3298 if self.op.offline is None:
3299 # Ignore
3300 pass
3301 elif self.op.offline:
3302 # Mark instance as offline
3303 self.cfg.MarkInstanceOffline(instance.name)
3304 result.append(("admin_state", constants.ADMINST_OFFLINE))
3305 else:
3306 # Mark instance as online, but stopped
3307 self.cfg.MarkInstanceDown(instance.name)
3308 result.append(("admin_state", constants.ADMINST_DOWN))
3309
3310 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3311
3312 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3313 self.owned_locks(locking.LEVEL_NODE)), \
3314 "All node locks should have been released by now"
3315
3316 return result
3317
3318 _DISK_CONVERSIONS = {
3319 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3320 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3321 }
3322
3323
3324 class LUInstanceChangeGroup(LogicalUnit):
3325 HPATH = "instance-change-group"
3326 HTYPE = constants.HTYPE_INSTANCE
3327 REQ_BGL = False
3328
3329 def ExpandNames(self):
3330 self.share_locks = ShareAll()
3331
3332 self.needed_locks = {
3333 locking.LEVEL_NODEGROUP: [],
3334 locking.LEVEL_NODE: [],
3335 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3336 }
3337
3338 self._ExpandAndLockInstance()
3339
3340 if self.op.target_groups:
3341 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3342 self.op.target_groups)
3343 else:
3344 self.req_target_uuids = None
3345
3346 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3347
3348 def DeclareLocks(self, level):
3349 if level == locking.LEVEL_NODEGROUP:
3350 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3351
3352 if self.req_target_uuids:
3353 lock_groups = set(self.req_target_uuids)
3354
3355 # Lock all groups used by instance optimistically; this requires going
3356 # via the node before it's locked, requiring verification later on
3357 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3358 lock_groups.update(instance_groups)
3359 else:
3360 # No target groups, need to lock all of them
3361 lock_groups = locking.ALL_SET
3362
3363 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3364
3365 elif level == locking.LEVEL_NODE:
3366 if self.req_target_uuids:
3367 # Lock all nodes used by instances
3368 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3369 self._LockInstancesNodes()
3370
3371 # Lock all nodes in all potential target groups
3372 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3373 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3374 member_nodes = [node_name
3375 for group in lock_groups
3376 for node_name in self.cfg.GetNodeGroup(group).members]
3377 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3378 else:
3379 # Lock all nodes as all groups are potential targets
3380 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3381
3382 def CheckPrereq(self):
3383 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3384 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3385 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3386
3387 assert (self.req_target_uuids is None or
3388 owned_groups.issuperset(self.req_target_uuids))
3389 assert owned_instances == set([self.op.instance_name])
3390
3391 # Get instance information
3392 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3393
3394 # Check if node groups for locked instance are still correct
3395 assert owned_nodes.issuperset(self.instance.all_nodes), \
3396 ("Instance %s's nodes changed while we kept the lock" %
3397 self.op.instance_name)
3398
3399 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3400 owned_groups)
3401
3402 if self.req_target_uuids:
3403 # User requested specific target groups
3404 self.target_uuids = frozenset(self.req_target_uuids)
3405 else:
3406 # All groups except those used by the instance are potential targets
3407 self.target_uuids = owned_groups - inst_groups
3408
3409 conflicting_groups = self.target_uuids & inst_groups
3410 if conflicting_groups:
3411 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3412 " used by the instance '%s'" %
3413 (utils.CommaJoin(conflicting_groups),
3414 self.op.instance_name),
3415 errors.ECODE_INVAL)
3416
3417 if not self.target_uuids:
3418 raise errors.OpPrereqError("There are no possible target groups",
3419 errors.ECODE_INVAL)
3420
3421 def BuildHooksEnv(self):
3422 """Build hooks env.