cmdlib: Cleanup public/private functions
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 CreateBlockDev, StartInstanceDisks, ShutdownInstanceDisks, \
58 AssembleInstanceDisks
59 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65
66 import ganeti.masterd.instance
67
68
69 #: Type description for changes as returned by L{_ApplyContainerMods}'s
70 #: callbacks
71 _TApplyContModsCbChanges = \
72 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73 ht.TNonEmptyString,
74 ht.TAny,
75 ])))
76
77
78 def _CheckHostnameSane(lu, name):
79 """Ensures that a given hostname resolves to a 'sane' name.
80
81 The given name is required to be a prefix of the resolved hostname,
82 to prevent accidental mismatches.
83
84 @param lu: the logical unit on behalf of which we're checking
85 @param name: the name we should resolve and check
86 @return: the resolved hostname object
87
88 """
89 hostname = netutils.GetHostname(name=name)
90 if hostname.name != name:
91 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92 if not utils.MatchNameComponent(name, [hostname.name]):
93 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94 " same as given hostname '%s'") %
95 (hostname.name, name), errors.ECODE_INVAL)
96 return hostname
97
98
99 def _CheckOpportunisticLocking(op):
100 """Generate error if opportunistic locking is not possible.
101
102 """
103 if op.opportunistic_locking and not op.iallocator:
104 raise errors.OpPrereqError("Opportunistic locking is only available in"
105 " combination with an instance allocator",
106 errors.ECODE_INVAL)
107
108
109 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
110 """Wrapper around IAReqInstanceAlloc.
111
112 @param op: The instance opcode
113 @param disks: The computed disks
114 @param nics: The computed nics
115 @param beparams: The full filled beparams
116 @param node_whitelist: List of nodes which should appear as online to the
117 allocator (unless the node is already marked offline)
118
119 @returns: A filled L{iallocator.IAReqInstanceAlloc}
120
121 """
122 spindle_use = beparams[constants.BE_SPINDLE_USE]
123 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124 disk_template=op.disk_template,
125 tags=op.tags,
126 os=op.os_type,
127 vcpus=beparams[constants.BE_VCPUS],
128 memory=beparams[constants.BE_MAXMEM],
129 spindle_use=spindle_use,
130 disks=disks,
131 nics=[n.ToDict() for n in nics],
132 hypervisor=op.hypervisor,
133 node_whitelist=node_whitelist)
134
135
136 def _ComputeFullBeParams(op, cluster):
137 """Computes the full beparams.
138
139 @param op: The instance opcode
140 @param cluster: The cluster config object
141
142 @return: The fully filled beparams
143
144 """
145 default_beparams = cluster.beparams[constants.PP_DEFAULT]
146 for param, value in op.beparams.iteritems():
147 if value == constants.VALUE_AUTO:
148 op.beparams[param] = default_beparams[param]
149 objects.UpgradeBeParams(op.beparams)
150 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151 return cluster.SimpleFillBE(op.beparams)
152
153
154 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155 """Computes the nics.
156
157 @param op: The instance opcode
158 @param cluster: Cluster configuration object
159 @param default_ip: The default ip to assign
160 @param cfg: An instance of the configuration object
161 @param ec_id: Execution context ID
162
163 @returns: The build up nics
164
165 """
166 nics = []
167 for nic in op.nics:
168 nic_mode_req = nic.get(constants.INIC_MODE, None)
169 nic_mode = nic_mode_req
170 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172
173 net = nic.get(constants.INIC_NETWORK, None)
174 link = nic.get(constants.NIC_LINK, None)
175 ip = nic.get(constants.INIC_IP, None)
176
177 if net is None or net.lower() == constants.VALUE_NONE:
178 net = None
179 else:
180 if nic_mode_req is not None or link is not None:
181 raise errors.OpPrereqError("If network is given, no mode or link"
182 " is allowed to be passed",
183 errors.ECODE_INVAL)
184
185 # ip validity checks
186 if ip is None or ip.lower() == constants.VALUE_NONE:
187 nic_ip = None
188 elif ip.lower() == constants.VALUE_AUTO:
189 if not op.name_check:
190 raise errors.OpPrereqError("IP address set to auto but name checks"
191 " have been skipped",
192 errors.ECODE_INVAL)
193 nic_ip = default_ip
194 else:
195 # We defer pool operations until later, so that the iallocator has
196 # filled in the instance's node(s) dimara
197 if ip.lower() == constants.NIC_IP_POOL:
198 if net is None:
199 raise errors.OpPrereqError("if ip=pool, parameter network"
200 " must be passed too",
201 errors.ECODE_INVAL)
202
203 elif not netutils.IPAddress.IsValid(ip):
204 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205 errors.ECODE_INVAL)
206
207 nic_ip = ip
208
209 # TODO: check the ip address for uniqueness
210 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211 raise errors.OpPrereqError("Routed nic mode requires an ip address",
212 errors.ECODE_INVAL)
213
214 # MAC address verification
215 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217 mac = utils.NormalizeAndValidateMac(mac)
218
219 try:
220 # TODO: We need to factor this out
221 cfg.ReserveMAC(mac, ec_id)
222 except errors.ReservationError:
223 raise errors.OpPrereqError("MAC address %s already in use"
224 " in cluster" % mac,
225 errors.ECODE_NOTUNIQUE)
226
227 # Build nic parameters
228 nicparams = {}
229 if nic_mode_req:
230 nicparams[constants.NIC_MODE] = nic_mode
231 if link:
232 nicparams[constants.NIC_LINK] = link
233
234 check_params = cluster.SimpleFillNIC(nicparams)
235 objects.NIC.CheckParameterSyntax(check_params)
236 net_uuid = cfg.LookupNetwork(net)
237 name = nic.get(constants.INIC_NAME, None)
238 if name is not None and name.lower() == constants.VALUE_NONE:
239 name = None
240 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241 network=net_uuid, nicparams=nicparams)
242 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243 nics.append(nic_obj)
244
245 return nics
246
247
248 def _CheckForConflictingIp(lu, ip, node):
249 """In case of conflicting IP address raise error.
250
251 @type ip: string
252 @param ip: IP address
253 @type node: string
254 @param node: node name
255
256 """
257 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
258 if conf_net is not None:
259 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260 " network %s, but the target NIC does not." %
261 (ip, conf_net)),
262 errors.ECODE_STATE)
263
264 return (None, None)
265
266
267 def _ComputeIPolicyInstanceSpecViolation(
268 ipolicy, instance_spec, disk_template,
269 _compute_fn=ComputeIPolicySpecViolation):
270 """Compute if instance specs meets the specs of ipolicy.
271
272 @type ipolicy: dict
273 @param ipolicy: The ipolicy to verify against
274 @param instance_spec: dict
275 @param instance_spec: The instance spec to verify
276 @type disk_template: string
277 @param disk_template: the disk template of the instance
278 @param _compute_fn: The function to verify ipolicy (unittest only)
279 @see: L{ComputeIPolicySpecViolation}
280
281 """
282 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288
289 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290 disk_sizes, spindle_use, disk_template)
291
292
293 def _CheckOSVariant(os_obj, name):
294 """Check whether an OS name conforms to the os variants specification.
295
296 @type os_obj: L{objects.OS}
297 @param os_obj: OS object to check
298 @type name: string
299 @param name: OS name passed by the user, to check for validity
300
301 """
302 variant = objects.OS.GetVariant(name)
303 if not os_obj.supported_variants:
304 if variant:
305 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306 " passed)" % (os_obj.name, variant),
307 errors.ECODE_INVAL)
308 return
309 if not variant:
310 raise errors.OpPrereqError("OS name must include a variant",
311 errors.ECODE_INVAL)
312
313 if variant not in os_obj.supported_variants:
314 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315
316
317 class LUInstanceCreate(LogicalUnit):
318 """Create an instance.
319
320 """
321 HPATH = "instance-add"
322 HTYPE = constants.HTYPE_INSTANCE
323 REQ_BGL = False
324
325 def CheckArguments(self):
326 """Check arguments.
327
328 """
329 # do not require name_check to ease forward/backward compatibility
330 # for tools
331 if self.op.no_install and self.op.start:
332 self.LogInfo("No-installation mode selected, disabling startup")
333 self.op.start = False
334 # validate/normalize the instance name
335 self.op.instance_name = \
336 netutils.Hostname.GetNormalizedName(self.op.instance_name)
337
338 if self.op.ip_check and not self.op.name_check:
339 # TODO: make the ip check more flexible and not depend on the name check
340 raise errors.OpPrereqError("Cannot do IP address check without a name"
341 " check", errors.ECODE_INVAL)
342
343 # check nics' parameter names
344 for nic in self.op.nics:
345 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346 # check that NIC's parameters names are unique and valid
347 utils.ValidateDeviceNames("NIC", self.op.nics)
348
349 # check that disk's names are unique and valid
350 utils.ValidateDeviceNames("disk", self.op.disks)
351
352 cluster = self.cfg.GetClusterInfo()
353 if not self.op.disk_template in cluster.enabled_disk_templates:
354 raise errors.OpPrereqError("Cannot create an instance with disk template"
355 " '%s', because it is not enabled in the"
356 " cluster. Enabled disk templates are: %s." %
357 (self.op.disk_template,
358 ",".join(cluster.enabled_disk_templates)))
359
360 # check disks. parameter names and consistent adopt/no-adopt strategy
361 has_adopt = has_no_adopt = False
362 for disk in self.op.disks:
363 if self.op.disk_template != constants.DT_EXT:
364 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365 if constants.IDISK_ADOPT in disk:
366 has_adopt = True
367 else:
368 has_no_adopt = True
369 if has_adopt and has_no_adopt:
370 raise errors.OpPrereqError("Either all disks are adopted or none is",
371 errors.ECODE_INVAL)
372 if has_adopt:
373 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374 raise errors.OpPrereqError("Disk adoption is not supported for the"
375 " '%s' disk template" %
376 self.op.disk_template,
377 errors.ECODE_INVAL)
378 if self.op.iallocator is not None:
379 raise errors.OpPrereqError("Disk adoption not allowed with an"
380 " iallocator script", errors.ECODE_INVAL)
381 if self.op.mode == constants.INSTANCE_IMPORT:
382 raise errors.OpPrereqError("Disk adoption not allowed for"
383 " instance import", errors.ECODE_INVAL)
384 else:
385 if self.op.disk_template in constants.DTS_MUST_ADOPT:
386 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387 " but no 'adopt' parameter given" %
388 self.op.disk_template,
389 errors.ECODE_INVAL)
390
391 self.adopt_disks = has_adopt
392
393 # instance name verification
394 if self.op.name_check:
395 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396 self.op.instance_name = self.hostname1.name
397 # used in CheckPrereq for ip ping check
398 self.check_ip = self.hostname1.ip
399 else:
400 self.check_ip = None
401
402 # file storage checks
403 if (self.op.file_driver and
404 not self.op.file_driver in constants.FILE_DRIVER):
405 raise errors.OpPrereqError("Invalid file driver name '%s'" %
406 self.op.file_driver, errors.ECODE_INVAL)
407
408 if self.op.disk_template == constants.DT_FILE:
409 opcodes.RequireFileStorage()
410 elif self.op.disk_template == constants.DT_SHARED_FILE:
411 opcodes.RequireSharedFileStorage()
412
413 ### Node/iallocator related checks
414 CheckIAllocatorOrNode(self, "iallocator", "pnode")
415
416 if self.op.pnode is not None:
417 if self.op.disk_template in constants.DTS_INT_MIRROR:
418 if self.op.snode is None:
419 raise errors.OpPrereqError("The networked disk templates need"
420 " a mirror node", errors.ECODE_INVAL)
421 elif self.op.snode:
422 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423 " template")
424 self.op.snode = None
425
426 _CheckOpportunisticLocking(self.op)
427
428 self._cds = GetClusterDomainSecret()
429
430 if self.op.mode == constants.INSTANCE_IMPORT:
431 # On import force_variant must be True, because if we forced it at
432 # initial install, our only chance when importing it back is that it
433 # works again!
434 self.op.force_variant = True
435
436 if self.op.no_install:
437 self.LogInfo("No-installation mode has no effect during import")
438
439 elif self.op.mode == constants.INSTANCE_CREATE:
440 if self.op.os_type is None:
441 raise errors.OpPrereqError("No guest OS specified",
442 errors.ECODE_INVAL)
443 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445 " installation" % self.op.os_type,
446 errors.ECODE_STATE)
447 if self.op.disk_template is None:
448 raise errors.OpPrereqError("No disk template specified",
449 errors.ECODE_INVAL)
450
451 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452 # Check handshake to ensure both clusters have the same domain secret
453 src_handshake = self.op.source_handshake
454 if not src_handshake:
455 raise errors.OpPrereqError("Missing source handshake",
456 errors.ECODE_INVAL)
457
458 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459 src_handshake)
460 if errmsg:
461 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462 errors.ECODE_INVAL)
463
464 # Load and check source CA
465 self.source_x509_ca_pem = self.op.source_x509_ca
466 if not self.source_x509_ca_pem:
467 raise errors.OpPrereqError("Missing source X509 CA",
468 errors.ECODE_INVAL)
469
470 try:
471 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472 self._cds)
473 except OpenSSL.crypto.Error, err:
474 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475 (err, ), errors.ECODE_INVAL)
476
477 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478 if errcode is not None:
479 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480 errors.ECODE_INVAL)
481
482 self.source_x509_ca = cert
483
484 src_instance_name = self.op.source_instance_name
485 if not src_instance_name:
486 raise errors.OpPrereqError("Missing source instance name",
487 errors.ECODE_INVAL)
488
489 self.source_instance_name = \
490 netutils.GetHostname(name=src_instance_name).name
491
492 else:
493 raise errors.OpPrereqError("Invalid instance creation mode %r" %
494 self.op.mode, errors.ECODE_INVAL)
495
496 def ExpandNames(self):
497 """ExpandNames for CreateInstance.
498
499 Figure out the right locks for instance creation.
500
501 """
502 self.needed_locks = {}
503
504 instance_name = self.op.instance_name
505 # this is just a preventive check, but someone might still add this
506 # instance in the meantime, and creation will fail at lock-add time
507 if instance_name in self.cfg.GetInstanceList():
508 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509 instance_name, errors.ECODE_EXISTS)
510
511 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512
513 if self.op.iallocator:
514 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515 # specifying a group on instance creation and then selecting nodes from
516 # that group
517 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519
520 if self.op.opportunistic_locking:
521 self.opportunistic_locks[locking.LEVEL_NODE] = True
522 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523 else:
524 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
525 nodelist = [self.op.pnode]
526 if self.op.snode is not None:
527 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
528 nodelist.append(self.op.snode)
529 self.needed_locks[locking.LEVEL_NODE] = nodelist
530
531 # in case of import lock the source node too
532 if self.op.mode == constants.INSTANCE_IMPORT:
533 src_node = self.op.src_node
534 src_path = self.op.src_path
535
536 if src_path is None:
537 self.op.src_path = src_path = self.op.instance_name
538
539 if src_node is None:
540 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
541 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
542 self.op.src_node = None
543 if os.path.isabs(src_path):
544 raise errors.OpPrereqError("Importing an instance from a path"
545 " requires a source node option",
546 errors.ECODE_INVAL)
547 else:
548 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
549 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
550 self.needed_locks[locking.LEVEL_NODE].append(src_node)
551 if not os.path.isabs(src_path):
552 self.op.src_path = src_path = \
553 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554
555 self.needed_locks[locking.LEVEL_NODE_RES] = \
556 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557
558 def _RunAllocator(self):
559 """Run the allocator based on input opcode.
560
561 """
562 if self.op.opportunistic_locking:
563 # Only consider nodes for which a lock is held
564 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
565 else:
566 node_whitelist = None
567
568 #TODO Export network to iallocator so that it chooses a pnode
569 # in a nodegroup that has the desired network connected to
570 req = _CreateInstanceAllocRequest(self.op, self.disks,
571 self.nics, self.be_full,
572 node_whitelist)
573 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
574
575 ial.Run(self.op.iallocator)
576
577 if not ial.success:
578 # When opportunistic locks are used only a temporary failure is generated
579 if self.op.opportunistic_locking:
580 ecode = errors.ECODE_TEMP_NORES
581 else:
582 ecode = errors.ECODE_NORES
583
584 raise errors.OpPrereqError("Can't compute nodes using"
585 " iallocator '%s': %s" %
586 (self.op.iallocator, ial.info),
587 ecode)
588
589 self.op.pnode = ial.result[0]
590 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
591 self.op.instance_name, self.op.iallocator,
592 utils.CommaJoin(ial.result))
593
594 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
595
596 if req.RequiredNodes() == 2:
597 self.op.snode = ial.result[1]
598
599 def BuildHooksEnv(self):
600 """Build hooks env.
601
602 This runs on master, primary and secondary nodes of the instance.
603
604 """
605 env = {
606 "ADD_MODE": self.op.mode,
607 }
608 if self.op.mode == constants.INSTANCE_IMPORT:
609 env["SRC_NODE"] = self.op.src_node
610 env["SRC_PATH"] = self.op.src_path
611 env["SRC_IMAGES"] = self.src_images
612
613 env.update(BuildInstanceHookEnv(
614 name=self.op.instance_name,
615 primary_node=self.op.pnode,
616 secondary_nodes=self.secondaries,
617 status=self.op.start,
618 os_type=self.op.os_type,
619 minmem=self.be_full[constants.BE_MINMEM],
620 maxmem=self.be_full[constants.BE_MAXMEM],
621 vcpus=self.be_full[constants.BE_VCPUS],
622 nics=NICListToTuple(self, self.nics),
623 disk_template=self.op.disk_template,
624 disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
625 d[constants.IDISK_MODE]) for d in self.disks],
626 bep=self.be_full,
627 hvp=self.hv_full,
628 hypervisor_name=self.op.hypervisor,
629 tags=self.op.tags,
630 ))
631
632 return env
633
634 def BuildHooksNodes(self):
635 """Build hooks nodes.
636
637 """
638 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
639 return nl, nl
640
641 def _ReadExportInfo(self):
642 """Reads the export information from disk.
643
644 It will override the opcode source node and path with the actual
645 information, if these two were not specified before.
646
647 @return: the export information
648
649 """
650 assert self.op.mode == constants.INSTANCE_IMPORT
651
652 src_node = self.op.src_node
653 src_path = self.op.src_path
654
655 if src_node is None:
656 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657 exp_list = self.rpc.call_export_list(locked_nodes)
658 found = False
659 for node in exp_list:
660 if exp_list[node].fail_msg:
661 continue
662 if src_path in exp_list[node].payload:
663 found = True
664 self.op.src_node = src_node = node
665 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
666 src_path)
667 break
668 if not found:
669 raise errors.OpPrereqError("No export found for relative path %s" %
670 src_path, errors.ECODE_INVAL)
671
672 CheckNodeOnline(self, src_node)
673 result = self.rpc.call_export_info(src_node, src_path)
674 result.Raise("No export or invalid export found in dir %s" % src_path)
675
676 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677 if not export_info.has_section(constants.INISECT_EXP):
678 raise errors.ProgrammerError("Corrupted export config",
679 errors.ECODE_ENVIRON)
680
681 ei_version = export_info.get(constants.INISECT_EXP, "version")
682 if (int(ei_version) != constants.EXPORT_VERSION):
683 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684 (ei_version, constants.EXPORT_VERSION),
685 errors.ECODE_ENVIRON)
686 return export_info
687
688 def _ReadExportParams(self, einfo):
689 """Use export parameters as defaults.
690
691 In case the opcode doesn't specify (as in override) some instance
692 parameters, then try to use them from the export information, if
693 that declares them.
694
695 """
696 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
697
698 if self.op.disk_template is None:
699 if einfo.has_option(constants.INISECT_INS, "disk_template"):
700 self.op.disk_template = einfo.get(constants.INISECT_INS,
701 "disk_template")
702 if self.op.disk_template not in constants.DISK_TEMPLATES:
703 raise errors.OpPrereqError("Disk template specified in configuration"
704 " file is not one of the allowed values:"
705 " %s" %
706 " ".join(constants.DISK_TEMPLATES),
707 errors.ECODE_INVAL)
708 else:
709 raise errors.OpPrereqError("No disk template specified and the export"
710 " is missing the disk_template information",
711 errors.ECODE_INVAL)
712
713 if not self.op.disks:
714 disks = []
715 # TODO: import the disk iv_name too
716 for idx in range(constants.MAX_DISKS):
717 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719 disks.append({constants.IDISK_SIZE: disk_sz})
720 self.op.disks = disks
721 if not disks and self.op.disk_template != constants.DT_DISKLESS:
722 raise errors.OpPrereqError("No disk info specified and the export"
723 " is missing the disk information",
724 errors.ECODE_INVAL)
725
726 if not self.op.nics:
727 nics = []
728 for idx in range(constants.MAX_NICS):
729 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
730 ndict = {}
731 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
733 ndict[name] = v
734 nics.append(ndict)
735 else:
736 break
737 self.op.nics = nics
738
739 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
741
742 if (self.op.hypervisor is None and
743 einfo.has_option(constants.INISECT_INS, "hypervisor")):
744 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
745
746 if einfo.has_section(constants.INISECT_HYP):
747 # use the export parameters but do not override the ones
748 # specified by the user
749 for name, value in einfo.items(constants.INISECT_HYP):
750 if name not in self.op.hvparams:
751 self.op.hvparams[name] = value
752
753 if einfo.has_section(constants.INISECT_BEP):
754 # use the parameters, without overriding
755 for name, value in einfo.items(constants.INISECT_BEP):
756 if name not in self.op.beparams:
757 self.op.beparams[name] = value
758 # Compatibility for the old "memory" be param
759 if name == constants.BE_MEMORY:
760 if constants.BE_MAXMEM not in self.op.beparams:
761 self.op.beparams[constants.BE_MAXMEM] = value
762 if constants.BE_MINMEM not in self.op.beparams:
763 self.op.beparams[constants.BE_MINMEM] = value
764 else:
765 # try to read the parameters old style, from the main section
766 for name in constants.BES_PARAMETERS:
767 if (name not in self.op.beparams and
768 einfo.has_option(constants.INISECT_INS, name)):
769 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
770
771 if einfo.has_section(constants.INISECT_OSP):
772 # use the parameters, without overriding
773 for name, value in einfo.items(constants.INISECT_OSP):
774 if name not in self.op.osparams:
775 self.op.osparams[name] = value
776
777 def _RevertToDefaults(self, cluster):
778 """Revert the instance parameters to the default values.
779
780 """
781 # hvparams
782 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783 for name in self.op.hvparams.keys():
784 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785 del self.op.hvparams[name]
786 # beparams
787 be_defs = cluster.SimpleFillBE({})
788 for name in self.op.beparams.keys():
789 if name in be_defs and be_defs[name] == self.op.beparams[name]:
790 del self.op.beparams[name]
791 # nic params
792 nic_defs = cluster.SimpleFillNIC({})
793 for nic in self.op.nics:
794 for name in constants.NICS_PARAMETERS:
795 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
796 del nic[name]
797 # osparams
798 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799 for name in self.op.osparams.keys():
800 if name in os_defs and os_defs[name] == self.op.osparams[name]:
801 del self.op.osparams[name]
802
803 def _CalculateFileStorageDir(self):
804 """Calculate final instance file storage dir.
805
806 """
807 # file storage dir calculation/check
808 self.instance_file_storage_dir = None
809 if self.op.disk_template in constants.DTS_FILEBASED:
810 # build the full file storage dir path
811 joinargs = []
812
813 if self.op.disk_template == constants.DT_SHARED_FILE:
814 get_fsd_fn = self.cfg.GetSharedFileStorageDir
815 else:
816 get_fsd_fn = self.cfg.GetFileStorageDir
817
818 cfg_storagedir = get_fsd_fn()
819 if not cfg_storagedir:
820 raise errors.OpPrereqError("Cluster file storage dir not defined",
821 errors.ECODE_STATE)
822 joinargs.append(cfg_storagedir)
823
824 if self.op.file_storage_dir is not None:
825 joinargs.append(self.op.file_storage_dir)
826
827 joinargs.append(self.op.instance_name)
828
829 # pylint: disable=W0142
830 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
831
832 def CheckPrereq(self): # pylint: disable=R0914
833 """Check prerequisites.
834
835 """
836 self._CalculateFileStorageDir()
837
838 if self.op.mode == constants.INSTANCE_IMPORT:
839 export_info = self._ReadExportInfo()
840 self._ReadExportParams(export_info)
841 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
842 else:
843 self._old_instance_name = None
844
845 if (not self.cfg.GetVGName() and
846 self.op.disk_template not in constants.DTS_NOT_LVM):
847 raise errors.OpPrereqError("Cluster does not support lvm-based"
848 " instances", errors.ECODE_STATE)
849
850 if (self.op.hypervisor is None or
851 self.op.hypervisor == constants.VALUE_AUTO):
852 self.op.hypervisor = self.cfg.GetHypervisorType()
853
854 cluster = self.cfg.GetClusterInfo()
855 enabled_hvs = cluster.enabled_hypervisors
856 if self.op.hypervisor not in enabled_hvs:
857 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
858 " cluster (%s)" %
859 (self.op.hypervisor, ",".join(enabled_hvs)),
860 errors.ECODE_STATE)
861
862 # Check tag validity
863 for tag in self.op.tags:
864 objects.TaggableObject.ValidateTag(tag)
865
866 # check hypervisor parameter syntax (locally)
867 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
869 self.op.hvparams)
870 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871 hv_type.CheckParameterSyntax(filled_hvp)
872 self.hv_full = filled_hvp
873 # check that we don't specify global parameters on an instance
874 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875 "instance", "cluster")
876
877 # fill and remember the beparams dict
878 self.be_full = _ComputeFullBeParams(self.op, cluster)
879
880 # build os parameters
881 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
882
883 # now that hvp/bep are in final format, let's reset to defaults,
884 # if told to do so
885 if self.op.identify_defaults:
886 self._RevertToDefaults(cluster)
887
888 # NIC buildup
889 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
890 self.proc.GetECId())
891
892 # disk checks/pre-build
893 default_vg = self.cfg.GetVGName()
894 self.disks = ComputeDisks(self.op, default_vg)
895
896 if self.op.mode == constants.INSTANCE_IMPORT:
897 disk_images = []
898 for idx in range(len(self.disks)):
899 option = "disk%d_dump" % idx
900 if export_info.has_option(constants.INISECT_INS, option):
901 # FIXME: are the old os-es, disk sizes, etc. useful?
902 export_name = export_info.get(constants.INISECT_INS, option)
903 image = utils.PathJoin(self.op.src_path, export_name)
904 disk_images.append(image)
905 else:
906 disk_images.append(False)
907
908 self.src_images = disk_images
909
910 if self.op.instance_name == self._old_instance_name:
911 for idx, nic in enumerate(self.nics):
912 if nic.mac == constants.VALUE_AUTO:
913 nic_mac_ini = "nic%d_mac" % idx
914 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
915
916 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
917
918 # ip ping checks (we use the same ip that was resolved in ExpandNames)
919 if self.op.ip_check:
920 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921 raise errors.OpPrereqError("IP %s of instance %s already in use" %
922 (self.check_ip, self.op.instance_name),
923 errors.ECODE_NOTUNIQUE)
924
925 #### mac address generation
926 # By generating here the mac address both the allocator and the hooks get
927 # the real final mac address rather than the 'auto' or 'generate' value.
928 # There is a race condition between the generation and the instance object
929 # creation, which means that we know the mac is valid now, but we're not
930 # sure it will be when we actually add the instance. If things go bad
931 # adding the instance will abort because of a duplicate mac, and the
932 # creation job will fail.
933 for nic in self.nics:
934 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
936
937 #### allocator run
938
939 if self.op.iallocator is not None:
940 self._RunAllocator()
941
942 # Release all unneeded node locks
943 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
947
948 assert (self.owned_locks(locking.LEVEL_NODE) ==
949 self.owned_locks(locking.LEVEL_NODE_RES)), \
950 "Node locks differ from node resource locks"
951
952 #### node related checks
953
954 # check primary node
955 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956 assert self.pnode is not None, \
957 "Cannot retrieve locked node %s" % self.op.pnode
958 if pnode.offline:
959 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960 pnode.name, errors.ECODE_STATE)
961 if pnode.drained:
962 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963 pnode.name, errors.ECODE_STATE)
964 if not pnode.vm_capable:
965 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966 " '%s'" % pnode.name, errors.ECODE_STATE)
967
968 self.secondaries = []
969
970 # Fill in any IPs from IP pools. This must happen here, because we need to
971 # know the nic's primary node, as specified by the iallocator
972 for idx, nic in enumerate(self.nics):
973 net_uuid = nic.network
974 if net_uuid is not None:
975 nobj = self.cfg.GetNetwork(net_uuid)
976 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977 if netparams is None:
978 raise errors.OpPrereqError("No netparams found for network"
979 " %s. Propably not connected to"
980 " node's %s nodegroup" %
981 (nobj.name, self.pnode.name),
982 errors.ECODE_INVAL)
983 self.LogInfo("NIC/%d inherits netparams %s" %
984 (idx, netparams.values()))
985 nic.nicparams = dict(netparams)
986 if nic.ip is not None:
987 if nic.ip.lower() == constants.NIC_IP_POOL:
988 try:
989 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990 except errors.ReservationError:
991 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992 " from the address pool" % idx,
993 errors.ECODE_STATE)
994 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
995 else:
996 try:
997 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998 except errors.ReservationError:
999 raise errors.OpPrereqError("IP address %s already in use"
1000 " or does not belong to network %s" %
1001 (nic.ip, nobj.name),
1002 errors.ECODE_NOTUNIQUE)
1003
1004 # net is None, ip None or given
1005 elif self.op.conflicts_check:
1006 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1007
1008 # mirror node verification
1009 if self.op.disk_template in constants.DTS_INT_MIRROR:
1010 if self.op.snode == pnode.name:
1011 raise errors.OpPrereqError("The secondary node cannot be the"
1012 " primary node", errors.ECODE_INVAL)
1013 CheckNodeOnline(self, self.op.snode)
1014 CheckNodeNotDrained(self, self.op.snode)
1015 CheckNodeVmCapable(self, self.op.snode)
1016 self.secondaries.append(self.op.snode)
1017
1018 snode = self.cfg.GetNodeInfo(self.op.snode)
1019 if pnode.group != snode.group:
1020 self.LogWarning("The primary and secondary nodes are in two"
1021 " different node groups; the disk parameters"
1022 " from the first disk's node group will be"
1023 " used")
1024
1025 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1026 nodes = [pnode]
1027 if self.op.disk_template in constants.DTS_INT_MIRROR:
1028 nodes.append(snode)
1029 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1030 if compat.any(map(has_es, nodes)):
1031 raise errors.OpPrereqError("Disk template %s not supported with"
1032 " exclusive storage" % self.op.disk_template,
1033 errors.ECODE_STATE)
1034
1035 nodenames = [pnode.name] + self.secondaries
1036
1037 if not self.adopt_disks:
1038 if self.op.disk_template == constants.DT_RBD:
1039 # _CheckRADOSFreeSpace() is just a placeholder.
1040 # Any function that checks prerequisites can be placed here.
1041 # Check if there is enough space on the RADOS cluster.
1042 CheckRADOSFreeSpace()
1043 elif self.op.disk_template == constants.DT_EXT:
1044 # FIXME: Function that checks prereqs if needed
1045 pass
1046 else:
1047 # Check lv size requirements, if not adopting
1048 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1049 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1050
1051 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1052 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1053 disk[constants.IDISK_ADOPT])
1054 for disk in self.disks])
1055 if len(all_lvs) != len(self.disks):
1056 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1057 errors.ECODE_INVAL)
1058 for lv_name in all_lvs:
1059 try:
1060 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1061 # to ReserveLV uses the same syntax
1062 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1063 except errors.ReservationError:
1064 raise errors.OpPrereqError("LV named %s used by another instance" %
1065 lv_name, errors.ECODE_NOTUNIQUE)
1066
1067 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1068 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1069
1070 node_lvs = self.rpc.call_lv_list([pnode.name],
1071 vg_names.payload.keys())[pnode.name]
1072 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1073 node_lvs = node_lvs.payload
1074
1075 delta = all_lvs.difference(node_lvs.keys())
1076 if delta:
1077 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1078 utils.CommaJoin(delta),
1079 errors.ECODE_INVAL)
1080 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1081 if online_lvs:
1082 raise errors.OpPrereqError("Online logical volumes found, cannot"
1083 " adopt: %s" % utils.CommaJoin(online_lvs),
1084 errors.ECODE_STATE)
1085 # update the size of disk based on what is found
1086 for dsk in self.disks:
1087 dsk[constants.IDISK_SIZE] = \
1088 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1089 dsk[constants.IDISK_ADOPT])][0]))
1090
1091 elif self.op.disk_template == constants.DT_BLOCK:
1092 # Normalize and de-duplicate device paths
1093 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1094 for disk in self.disks])
1095 if len(all_disks) != len(self.disks):
1096 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1097 errors.ECODE_INVAL)
1098 baddisks = [d for d in all_disks
1099 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1100 if baddisks:
1101 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1102 " cannot be adopted" %
1103 (utils.CommaJoin(baddisks),
1104 constants.ADOPTABLE_BLOCKDEV_ROOT),
1105 errors.ECODE_INVAL)
1106
1107 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1108 list(all_disks))[pnode.name]
1109 node_disks.Raise("Cannot get block device information from node %s" %
1110 pnode.name)
1111 node_disks = node_disks.payload
1112 delta = all_disks.difference(node_disks.keys())
1113 if delta:
1114 raise errors.OpPrereqError("Missing block device(s): %s" %
1115 utils.CommaJoin(delta),
1116 errors.ECODE_INVAL)
1117 for dsk in self.disks:
1118 dsk[constants.IDISK_SIZE] = \
1119 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1120
1121 # Verify instance specs
1122 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1123 ispec = {
1124 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1125 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1126 constants.ISPEC_DISK_COUNT: len(self.disks),
1127 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1128 for disk in self.disks],
1129 constants.ISPEC_NIC_COUNT: len(self.nics),
1130 constants.ISPEC_SPINDLE_USE: spindle_use,
1131 }
1132
1133 group_info = self.cfg.GetNodeGroup(pnode.group)
1134 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1135 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1136 self.op.disk_template)
1137 if not self.op.ignore_ipolicy and res:
1138 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1139 (pnode.group, group_info.name, utils.CommaJoin(res)))
1140 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1141
1142 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1143
1144 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1145 # check OS parameters (remotely)
1146 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1147
1148 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1149
1150 #TODO: _CheckExtParams (remotely)
1151 # Check parameters for extstorage
1152
1153 # memory check on primary node
1154 #TODO(dynmem): use MINMEM for checking
1155 if self.op.start:
1156 CheckNodeFreeMemory(self, self.pnode.name,
1157 "creating instance %s" % self.op.instance_name,
1158 self.be_full[constants.BE_MAXMEM],
1159 self.op.hypervisor)
1160
1161 self.dry_run_result = list(nodenames)
1162
1163 def Exec(self, feedback_fn):
1164 """Create and add the instance to the cluster.
1165
1166 """
1167 instance = self.op.instance_name
1168 pnode_name = self.pnode.name
1169
1170 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1171 self.owned_locks(locking.LEVEL_NODE)), \
1172 "Node locks differ from node resource locks"
1173 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1174
1175 ht_kind = self.op.hypervisor
1176 if ht_kind in constants.HTS_REQ_PORT:
1177 network_port = self.cfg.AllocatePort()
1178 else:
1179 network_port = None
1180
1181 # This is ugly but we got a chicken-egg problem here
1182 # We can only take the group disk parameters, as the instance
1183 # has no disks yet (we are generating them right here).
1184 node = self.cfg.GetNodeInfo(pnode_name)
1185 nodegroup = self.cfg.GetNodeGroup(node.group)
1186 disks = GenerateDiskTemplate(self,
1187 self.op.disk_template,
1188 instance, pnode_name,
1189 self.secondaries,
1190 self.disks,
1191 self.instance_file_storage_dir,
1192 self.op.file_driver,
1193 0,
1194 feedback_fn,
1195 self.cfg.GetGroupDiskParams(nodegroup))
1196
1197 iobj = objects.Instance(name=instance, os=self.op.os_type,
1198 primary_node=pnode_name,
1199 nics=self.nics, disks=disks,
1200 disk_template=self.op.disk_template,
1201 admin_state=constants.ADMINST_DOWN,
1202 network_port=network_port,
1203 beparams=self.op.beparams,
1204 hvparams=self.op.hvparams,
1205 hypervisor=self.op.hypervisor,
1206 osparams=self.op.osparams,
1207 )
1208
1209 if self.op.tags:
1210 for tag in self.op.tags:
1211 iobj.AddTag(tag)
1212
1213 if self.adopt_disks:
1214 if self.op.disk_template == constants.DT_PLAIN:
1215 # rename LVs to the newly-generated names; we need to construct
1216 # 'fake' LV disks with the old data, plus the new unique_id
1217 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1218 rename_to = []
1219 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1220 rename_to.append(t_dsk.logical_id)
1221 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1222 self.cfg.SetDiskID(t_dsk, pnode_name)
1223 result = self.rpc.call_blockdev_rename(pnode_name,
1224 zip(tmp_disks, rename_to))
1225 result.Raise("Failed to rename adoped LVs")
1226 else:
1227 feedback_fn("* creating instance disks...")
1228 try:
1229 CreateDisks(self, iobj)
1230 except errors.OpExecError:
1231 self.LogWarning("Device creation failed")
1232 self.cfg.ReleaseDRBDMinors(instance)
1233 raise
1234
1235 feedback_fn("adding instance %s to cluster config" % instance)
1236
1237 self.cfg.AddInstance(iobj, self.proc.GetECId())
1238
1239 # Declare that we don't want to remove the instance lock anymore, as we've
1240 # added the instance to the config
1241 del self.remove_locks[locking.LEVEL_INSTANCE]
1242
1243 if self.op.mode == constants.INSTANCE_IMPORT:
1244 # Release unused nodes
1245 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1246 else:
1247 # Release all nodes
1248 ReleaseLocks(self, locking.LEVEL_NODE)
1249
1250 disk_abort = False
1251 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1252 feedback_fn("* wiping instance disks...")
1253 try:
1254 WipeDisks(self, iobj)
1255 except errors.OpExecError, err:
1256 logging.exception("Wiping disks failed")
1257 self.LogWarning("Wiping instance disks failed (%s)", err)
1258 disk_abort = True
1259
1260 if disk_abort:
1261 # Something is already wrong with the disks, don't do anything else
1262 pass
1263 elif self.op.wait_for_sync:
1264 disk_abort = not WaitForSync(self, iobj)
1265 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1266 # make sure the disks are not degraded (still sync-ing is ok)
1267 feedback_fn("* checking mirrors status")
1268 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1269 else:
1270 disk_abort = False
1271
1272 if disk_abort:
1273 RemoveDisks(self, iobj)
1274 self.cfg.RemoveInstance(iobj.name)
1275 # Make sure the instance lock gets removed
1276 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1277 raise errors.OpExecError("There are some degraded disks for"
1278 " this instance")
1279
1280 # Release all node resource locks
1281 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1282
1283 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1284 # we need to set the disks ID to the primary node, since the
1285 # preceding code might or might have not done it, depending on
1286 # disk template and other options
1287 for disk in iobj.disks:
1288 self.cfg.SetDiskID(disk, pnode_name)
1289 if self.op.mode == constants.INSTANCE_CREATE:
1290 if not self.op.no_install:
1291 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1292 not self.op.wait_for_sync)
1293 if pause_sync:
1294 feedback_fn("* pausing disk sync to install instance OS")
1295 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1296 (iobj.disks,
1297 iobj), True)
1298 for idx, success in enumerate(result.payload):
1299 if not success:
1300 logging.warn("pause-sync of instance %s for disk %d failed",
1301 instance, idx)
1302
1303 feedback_fn("* running the instance OS create scripts...")
1304 # FIXME: pass debug option from opcode to backend
1305 os_add_result = \
1306 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1307 self.op.debug_level)
1308 if pause_sync:
1309 feedback_fn("* resuming disk sync")
1310 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1311 (iobj.disks,
1312 iobj), False)
1313 for idx, success in enumerate(result.payload):
1314 if not success:
1315 logging.warn("resume-sync of instance %s for disk %d failed",
1316 instance, idx)
1317
1318 os_add_result.Raise("Could not add os for instance %s"
1319 " on node %s" % (instance, pnode_name))
1320
1321 else:
1322 if self.op.mode == constants.INSTANCE_IMPORT:
1323 feedback_fn("* running the instance OS import scripts...")
1324
1325 transfers = []
1326
1327 for idx, image in enumerate(self.src_images):
1328 if not image:
1329 continue
1330
1331 # FIXME: pass debug option from opcode to backend
1332 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1333 constants.IEIO_FILE, (image, ),
1334 constants.IEIO_SCRIPT,
1335 (iobj.disks[idx], idx),
1336 None)
1337 transfers.append(dt)
1338
1339 import_result = \
1340 masterd.instance.TransferInstanceData(self, feedback_fn,
1341 self.op.src_node, pnode_name,
1342 self.pnode.secondary_ip,
1343 iobj, transfers)
1344 if not compat.all(import_result):
1345 self.LogWarning("Some disks for instance %s on node %s were not"
1346 " imported successfully" % (instance, pnode_name))
1347
1348 rename_from = self._old_instance_name
1349
1350 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1351 feedback_fn("* preparing remote import...")
1352 # The source cluster will stop the instance before attempting to make
1353 # a connection. In some cases stopping an instance can take a long
1354 # time, hence the shutdown timeout is added to the connection
1355 # timeout.
1356 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1357 self.op.source_shutdown_timeout)
1358 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1359
1360 assert iobj.primary_node == self.pnode.name
1361 disk_results = \
1362 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1363 self.source_x509_ca,
1364 self._cds, timeouts)
1365 if not compat.all(disk_results):
1366 # TODO: Should the instance still be started, even if some disks
1367 # failed to import (valid for local imports, too)?
1368 self.LogWarning("Some disks for instance %s on node %s were not"
1369 " imported successfully" % (instance, pnode_name))
1370
1371 rename_from = self.source_instance_name
1372
1373 else:
1374 # also checked in the prereq part
1375 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1376 % self.op.mode)
1377
1378 # Run rename script on newly imported instance
1379 assert iobj.name == instance
1380 feedback_fn("Running rename script for %s" % instance)
1381 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1382 rename_from,
1383 self.op.debug_level)
1384 if result.fail_msg:
1385 self.LogWarning("Failed to run rename script for %s on node"
1386 " %s: %s" % (instance, pnode_name, result.fail_msg))
1387
1388 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1389
1390 if self.op.start:
1391 iobj.admin_state = constants.ADMINST_UP
1392 self.cfg.Update(iobj, feedback_fn)
1393 logging.info("Starting instance %s on node %s", instance, pnode_name)
1394 feedback_fn("* starting instance...")
1395 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1396 False, self.op.reason)
1397 result.Raise("Could not start instance")
1398
1399 return list(iobj.all_nodes)
1400
1401
1402 class LUInstanceRename(LogicalUnit):
1403 """Rename an instance.
1404
1405 """
1406 HPATH = "instance-rename"
1407 HTYPE = constants.HTYPE_INSTANCE
1408
1409 def CheckArguments(self):
1410 """Check arguments.
1411
1412 """
1413 if self.op.ip_check and not self.op.name_check:
1414 # TODO: make the ip check more flexible and not depend on the name check
1415 raise errors.OpPrereqError("IP address check requires a name check",
1416 errors.ECODE_INVAL)
1417
1418 def BuildHooksEnv(self):
1419 """Build hooks env.
1420
1421 This runs on master, primary and secondary nodes of the instance.
1422
1423 """
1424 env = BuildInstanceHookEnvByObject(self, self.instance)
1425 env["INSTANCE_NEW_NAME"] = self.op.new_name
1426 return env
1427
1428 def BuildHooksNodes(self):
1429 """Build hooks nodes.
1430
1431 """
1432 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1433 return (nl, nl)
1434
1435 def CheckPrereq(self):
1436 """Check prerequisites.
1437
1438 This checks that the instance is in the cluster and is not running.
1439
1440 """
1441 self.op.instance_name = ExpandInstanceName(self.cfg,
1442 self.op.instance_name)
1443 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1444 assert instance is not None
1445 CheckNodeOnline(self, instance.primary_node)
1446 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1447 msg="cannot rename")
1448 self.instance = instance
1449
1450 new_name = self.op.new_name
1451 if self.op.name_check:
1452 hostname = _CheckHostnameSane(self, new_name)
1453 new_name = self.op.new_name = hostname.name
1454 if (self.op.ip_check and
1455 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1456 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1457 (hostname.ip, new_name),
1458 errors.ECODE_NOTUNIQUE)
1459
1460 instance_list = self.cfg.GetInstanceList()
1461 if new_name in instance_list and new_name != instance.name:
1462 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1463 new_name, errors.ECODE_EXISTS)
1464
1465 def Exec(self, feedback_fn):
1466 """Rename the instance.
1467
1468 """
1469 inst = self.instance
1470 old_name = inst.name
1471
1472 rename_file_storage = False
1473 if (inst.disk_template in constants.DTS_FILEBASED and
1474 self.op.new_name != inst.name):
1475 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1476 rename_file_storage = True
1477
1478 self.cfg.RenameInstance(inst.name, self.op.new_name)
1479 # Change the instance lock. This is definitely safe while we hold the BGL.
1480 # Otherwise the new lock would have to be added in acquired mode.
1481 assert self.REQ_BGL
1482 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1483 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1484 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1485
1486 # re-read the instance from the configuration after rename
1487 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1488
1489 if rename_file_storage:
1490 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1491 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1492 old_file_storage_dir,
1493 new_file_storage_dir)
1494 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1495 " (but the instance has been renamed in Ganeti)" %
1496 (inst.primary_node, old_file_storage_dir,
1497 new_file_storage_dir))
1498
1499 StartInstanceDisks(self, inst, None)
1500 # update info on disks
1501 info = GetInstanceInfoText(inst)
1502 for (idx, disk) in enumerate(inst.disks):
1503 for node in inst.all_nodes:
1504 self.cfg.SetDiskID(disk, node)
1505 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1506 if result.fail_msg:
1507 self.LogWarning("Error setting info on node %s for disk %s: %s",
1508 node, idx, result.fail_msg)
1509 try:
1510 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1511 old_name, self.op.debug_level)
1512 msg = result.fail_msg
1513 if msg:
1514 msg = ("Could not run OS rename script for instance %s on node %s"
1515 " (but the instance has been renamed in Ganeti): %s" %
1516 (inst.name, inst.primary_node, msg))
1517 self.LogWarning(msg)
1518 finally:
1519 ShutdownInstanceDisks(self, inst)
1520
1521 return inst.name
1522
1523
1524 class LUInstanceRemove(LogicalUnit):
1525 """Remove an instance.
1526
1527 """
1528 HPATH = "instance-remove"
1529 HTYPE = constants.HTYPE_INSTANCE
1530 REQ_BGL = False
1531
1532 def ExpandNames(self):
1533 self._ExpandAndLockInstance()
1534 self.needed_locks[locking.LEVEL_NODE] = []
1535 self.needed_locks[locking.LEVEL_NODE_RES] = []
1536 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1537
1538 def DeclareLocks(self, level):
1539 if level == locking.LEVEL_NODE:
1540 self._LockInstancesNodes()
1541 elif level == locking.LEVEL_NODE_RES:
1542 # Copy node locks
1543 self.needed_locks[locking.LEVEL_NODE_RES] = \
1544 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1545
1546 def BuildHooksEnv(self):
1547 """Build hooks env.
1548
1549 This runs on master, primary and secondary nodes of the instance.
1550
1551 """
1552 env = BuildInstanceHookEnvByObject(self, self.instance)
1553 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1554 return env
1555
1556 def BuildHooksNodes(self):
1557 """Build hooks nodes.
1558
1559 """
1560 nl = [self.cfg.GetMasterNode()]
1561 nl_post = list(self.instance.all_nodes) + nl
1562 return (nl, nl_post)
1563
1564 def CheckPrereq(self):
1565 """Check prerequisites.
1566
1567 This checks that the instance is in the cluster.
1568
1569 """
1570 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1571 assert self.instance is not None, \
1572 "Cannot retrieve locked instance %s" % self.op.instance_name
1573
1574 def Exec(self, feedback_fn):
1575 """Remove the instance.
1576
1577 """
1578 instance = self.instance
1579 logging.info("Shutting down instance %s on node %s",
1580 instance.name, instance.primary_node)
1581
1582 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1583 self.op.shutdown_timeout,
1584 self.op.reason)
1585 msg = result.fail_msg
1586 if msg:
1587 if self.op.ignore_failures:
1588 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1589 else:
1590 raise errors.OpExecError("Could not shutdown instance %s on"
1591 " node %s: %s" %
1592 (instance.name, instance.primary_node, msg))
1593
1594 assert (self.owned_locks(locking.LEVEL_NODE) ==
1595 self.owned_locks(locking.LEVEL_NODE_RES))
1596 assert not (set(instance.all_nodes) -
1597 self.owned_locks(locking.LEVEL_NODE)), \
1598 "Not owning correct locks"
1599
1600 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1601
1602
1603 class LUInstanceMove(LogicalUnit):
1604 """Move an instance by data-copying.
1605
1606 """
1607 HPATH = "instance-move"
1608 HTYPE = constants.HTYPE_INSTANCE
1609 REQ_BGL = False
1610
1611 def ExpandNames(self):
1612 self._ExpandAndLockInstance()
1613 target_node = ExpandNodeName(self.cfg, self.op.target_node)
1614 self.op.target_node = target_node
1615 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1616 self.needed_locks[locking.LEVEL_NODE_RES] = []
1617 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1618
1619 def DeclareLocks(self, level):
1620 if level == locking.LEVEL_NODE:
1621 self._LockInstancesNodes(primary_only=True)
1622 elif level == locking.LEVEL_NODE_RES:
1623 # Copy node locks
1624 self.needed_locks[locking.LEVEL_NODE_RES] = \
1625 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1626
1627 def BuildHooksEnv(self):
1628 """Build hooks env.
1629
1630 This runs on master, primary and secondary nodes of the instance.
1631
1632 """
1633 env = {
1634 "TARGET_NODE": self.op.target_node,
1635 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1636 }
1637 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1638 return env
1639
1640 def BuildHooksNodes(self):
1641 """Build hooks nodes.
1642
1643 """
1644 nl = [
1645 self.cfg.GetMasterNode(),
1646 self.instance.primary_node,
1647 self.op.target_node,
1648 ]
1649 return (nl, nl)
1650
1651 def CheckPrereq(self):
1652 """Check prerequisites.
1653
1654 This checks that the instance is in the cluster.
1655
1656 """
1657 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1658 assert self.instance is not None, \
1659 "Cannot retrieve locked instance %s" % self.op.instance_name
1660
1661 if instance.disk_template not in constants.DTS_COPYABLE:
1662 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1663 instance.disk_template, errors.ECODE_STATE)
1664
1665 node = self.cfg.GetNodeInfo(self.op.target_node)
1666 assert node is not None, \
1667 "Cannot retrieve locked node %s" % self.op.target_node
1668
1669 self.target_node = target_node = node.name
1670
1671 if target_node == instance.primary_node:
1672 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1673 (instance.name, target_node),
1674 errors.ECODE_STATE)
1675
1676 bep = self.cfg.GetClusterInfo().FillBE(instance)
1677
1678 for idx, dsk in enumerate(instance.disks):
1679 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1680 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1681 " cannot copy" % idx, errors.ECODE_STATE)
1682
1683 CheckNodeOnline(self, target_node)
1684 CheckNodeNotDrained(self, target_node)
1685 CheckNodeVmCapable(self, target_node)
1686 cluster = self.cfg.GetClusterInfo()
1687 group_info = self.cfg.GetNodeGroup(node.group)
1688 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1689 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1690 ignore=self.op.ignore_ipolicy)
1691
1692 if instance.admin_state == constants.ADMINST_UP:
1693 # check memory requirements on the secondary node
1694 CheckNodeFreeMemory(self, target_node,
1695 "failing over instance %s" %
1696 instance.name, bep[constants.BE_MAXMEM],
1697 instance.hypervisor)
1698 else:
1699 self.LogInfo("Not checking memory on the secondary node as"
1700 " instance will not be started")
1701
1702 # check bridge existance
1703 CheckInstanceBridgesExist(self, instance, node=target_node)
1704
1705 def Exec(self, feedback_fn):
1706 """Move an instance.
1707
1708 The move is done by shutting it down on its present node, copying
1709 the data over (slow) and starting it on the new node.
1710
1711 """
1712 instance = self.instance
1713
1714 source_node = instance.primary_node
1715 target_node = self.target_node
1716
1717 self.LogInfo("Shutting down instance %s on source node %s",
1718 instance.name, source_node)
1719
1720 assert (self.owned_locks(locking.LEVEL_NODE) ==
1721 self.owned_locks(locking.LEVEL_NODE_RES))
1722
1723 result = self.rpc.call_instance_shutdown(source_node, instance,
1724 self.op.shutdown_timeout,
1725 self.op.reason)
1726 msg = result.fail_msg
1727 if msg:
1728 if self.op.ignore_consistency:
1729 self.LogWarning("Could not shutdown instance %s on node %s."
1730 " Proceeding anyway. Please make sure node"
1731 " %s is down. Error details: %s",
1732 instance.name, source_node, source_node, msg)
1733 else:
1734 raise errors.OpExecError("Could not shutdown instance %s on"
1735 " node %s: %s" %
1736 (instance.name, source_node, msg))
1737
1738 # create the target disks
1739 try:
1740 CreateDisks(self, instance, target_node=target_node)
1741 except errors.OpExecError:
1742 self.LogWarning("Device creation failed")
1743 self.cfg.ReleaseDRBDMinors(instance.name)
1744 raise
1745
1746 cluster_name = self.cfg.GetClusterInfo().cluster_name
1747
1748 errs = []
1749 # activate, get path, copy the data over
1750 for idx, disk in enumerate(instance.disks):
1751 self.LogInfo("Copying data for disk %d", idx)
1752 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1753 instance.name, True, idx)
1754 if result.fail_msg:
1755 self.LogWarning("Can't assemble newly created disk %d: %s",
1756 idx, result.fail_msg)
1757 errs.append(result.fail_msg)
1758 break
1759 dev_path = result.payload
1760 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1761 target_node, dev_path,
1762 cluster_name)
1763 if result.fail_msg:
1764 self.LogWarning("Can't copy data over for disk %d: %s",
1765 idx, result.fail_msg)
1766 errs.append(result.fail_msg)
1767 break
1768
1769 if errs:
1770 self.LogWarning("Some disks failed to copy, aborting")
1771 try:
1772 RemoveDisks(self, instance, target_node=target_node)
1773 finally:
1774 self.cfg.ReleaseDRBDMinors(instance.name)
1775 raise errors.OpExecError("Errors during disk copy: %s" %
1776 (",".join(errs),))
1777
1778 instance.primary_node = target_node
1779 self.cfg.Update(instance, feedback_fn)
1780
1781 self.LogInfo("Removing the disks on the original node")
1782 RemoveDisks(self, instance, target_node=source_node)
1783
1784 # Only start the instance if it's marked as up
1785 if instance.admin_state == constants.ADMINST_UP:
1786 self.LogInfo("Starting instance %s on node %s",
1787 instance.name, target_node)
1788
1789 disks_ok, _ = AssembleInstanceDisks(self, instance,
1790 ignore_secondaries=True)
1791 if not disks_ok:
1792 ShutdownInstanceDisks(self, instance)
1793 raise errors.OpExecError("Can't activate the instance's disks")
1794
1795 result = self.rpc.call_instance_start(target_node,
1796 (instance, None, None), False,
1797 self.op.reason)
1798 msg = result.fail_msg
1799 if msg:
1800 ShutdownInstanceDisks(self, instance)
1801 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1802 (instance.name, target_node, msg))
1803
1804
1805 class LUInstanceMultiAlloc(NoHooksLU):
1806 """Allocates multiple instances at the same time.
1807
1808 """
1809 REQ_BGL = False
1810
1811 def CheckArguments(self):
1812 """Check arguments.
1813
1814 """
1815 nodes = []
1816 for inst in self.op.instances:
1817 if inst.iallocator is not None:
1818 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1819 " instance objects", errors.ECODE_INVAL)
1820 nodes.append(bool(inst.pnode))
1821 if inst.disk_template in constants.DTS_INT_MIRROR:
1822 nodes.append(bool(inst.snode))
1823
1824 has_nodes = compat.any(nodes)
1825 if compat.all(nodes) ^ has_nodes:
1826 raise errors.OpPrereqError("There are instance objects providing"
1827 " pnode/snode while others do not",
1828 errors.ECODE_INVAL)
1829
1830 if self.op.iallocator is None:
1831 default_iallocator = self.cfg.GetDefaultIAllocator()
1832 if default_iallocator and has_nodes:
1833 self.op.iallocator = default_iallocator
1834 else:
1835 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1836 " given and no cluster-wide default"
1837 " iallocator found; please specify either"
1838 " an iallocator or nodes on the instances"
1839 " or set a cluster-wide default iallocator",
1840 errors.ECODE_INVAL)
1841
1842 _CheckOpportunisticLocking(self.op)
1843
1844 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1845 if dups:
1846 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1847 utils.CommaJoin(dups), errors.ECODE_INVAL)
1848
1849 def ExpandNames(self):
1850 """Calculate the locks.
1851
1852 """
1853 self.share_locks = ShareAll()
1854 self.needed_locks = {
1855 # iallocator will select nodes and even if no iallocator is used,
1856 # collisions with LUInstanceCreate should be avoided
1857 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1858 }
1859
1860 if self.op.iallocator:
1861 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1862 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1863
1864 if self.op.opportunistic_locking:
1865 self.opportunistic_locks[locking.LEVEL_NODE] = True
1866 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1867 else:
1868 nodeslist = []
1869 for inst in self.op.instances:
1870 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1871 nodeslist.append(inst.pnode)
1872 if inst.snode is not None:
1873 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1874 nodeslist.append(inst.snode)
1875
1876 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1877 # Lock resources of instance's primary and secondary nodes (copy to
1878 # prevent accidential modification)
1879 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1880
1881 def CheckPrereq(self):
1882 """Check prerequisite.
1883
1884 """
1885 cluster = self.cfg.GetClusterInfo()
1886 default_vg = self.cfg.GetVGName()
1887 ec_id = self.proc.GetECId()
1888
1889 if self.op.opportunistic_locking:
1890 # Only consider nodes for which a lock is held
1891 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1892 else:
1893 node_whitelist = None
1894
1895 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1896 _ComputeNics(op, cluster, None,
1897 self.cfg, ec_id),
1898 _ComputeFullBeParams(op, cluster),
1899 node_whitelist)
1900 for op in self.op.instances]
1901
1902 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1903 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1904
1905 ial.Run(self.op.iallocator)
1906
1907 if not ial.success:
1908 raise errors.OpPrereqError("Can't compute nodes using"
1909 " iallocator '%s': %s" %
1910 (self.op.iallocator, ial.info),
1911 errors.ECODE_NORES)
1912
1913 self.ia_result = ial.result
1914
1915 if self.op.dry_run:
1916 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1917 constants.JOB_IDS_KEY: [],
1918 })
1919
1920 def _ConstructPartialResult(self):
1921 """Contructs the partial result.
1922
1923 """
1924 (allocatable, failed) = self.ia_result
1925 return {
1926 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1927 map(compat.fst, allocatable),
1928 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1929 }
1930
1931 def Exec(self, feedback_fn):
1932 """Executes the opcode.
1933
1934 """
1935 op2inst = dict((op.instance_name, op) for op in self.op.instances)
1936 (allocatable, failed) = self.ia_result
1937
1938 jobs = []
1939 for (name, nodes) in allocatable:
1940 op = op2inst.pop(name)
1941
1942 if len(nodes) > 1:
1943 (op.pnode, op.snode) = nodes
1944 else:
1945 (op.pnode,) = nodes
1946
1947 jobs.append([op])
1948
1949 missing = set(op2inst.keys()) - set(failed)
1950 assert not missing, \
1951 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1952
1953 return ResultWithJobs(jobs, **self._ConstructPartialResult())
1954
1955
1956 class _InstNicModPrivate:
1957 """Data structure for network interface modifications.
1958
1959 Used by L{LUInstanceSetParams}.
1960
1961 """
1962 def __init__(self):
1963 self.params = None
1964 self.filled = None
1965
1966
1967 def _PrepareContainerMods(mods, private_fn):
1968 """Prepares a list of container modifications by adding a private data field.
1969
1970 @type mods: list of tuples; (operation, index, parameters)
1971 @param mods: List of modifications
1972 @type private_fn: callable or None
1973 @param private_fn: Callable for constructing a private data field for a
1974 modification
1975 @rtype: list
1976
1977 """
1978 if private_fn is None:
1979 fn = lambda: None
1980 else:
1981 fn = private_fn
1982
1983 return [(op, idx, params, fn()) for (op, idx, params) in mods]
1984
1985
1986 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1987 """Checks if nodes have enough physical CPUs
1988
1989 This function checks if all given nodes have the needed number of
1990 physical CPUs. In case any node has less CPUs or we cannot get the
1991 information from the node, this function raises an OpPrereqError
1992 exception.
1993
1994 @type lu: C{LogicalUnit}
1995 @param lu: a logical unit from which we get configuration data
1996 @type nodenames: C{list}
1997 @param nodenames: the list of node names to check
1998 @type requested: C{int}
1999 @param requested: the minimum acceptable number of physical CPUs
2000 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2001 or we cannot check the node
2002
2003 """
2004 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2005 for node in nodenames:
2006 info = nodeinfo[node]
2007 info.Raise("Cannot get current information from node %s" % node,
2008 prereq=True, ecode=errors.ECODE_ENVIRON)
2009 (_, _, (hv_info, )) = info.payload
2010 num_cpus = hv_info.get("cpu_total", None)
2011 if not isinstance(num_cpus, int):
2012 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2013 " on node %s, result was '%s'" %
2014 (node, num_cpus), errors.ECODE_ENVIRON)
2015 if requested > num_cpus:
2016 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2017 "required" % (node, num_cpus, requested),
2018 errors.ECODE_NORES)
2019
2020
2021 def GetItemFromContainer(identifier, kind, container):
2022 """Return the item refered by the identifier.
2023
2024 @type identifier: string
2025 @param identifier: Item index or name or UUID
2026 @type kind: string
2027 @param kind: One-word item description
2028 @type container: list
2029 @param container: Container to get the item from
2030
2031 """
2032 # Index
2033 try:
2034 idx = int(identifier)
2035 if idx == -1:
2036 # Append
2037 absidx = len(container) - 1
2038 elif idx < 0:
2039 raise IndexError("Not accepting negative indices other than -1")
2040 elif idx > len(container):
2041 raise IndexError("Got %s index %s, but there are only %s" %
2042 (kind, idx, len(container)))
2043 else:
2044 absidx = idx
2045 return (absidx, container[idx])
2046 except ValueError:
2047 pass
2048
2049 for idx, item in enumerate(container):
2050 if item.uuid == identifier or item.name == identifier:
2051 return (idx, item)
2052
2053 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2054 (kind, identifier), errors.ECODE_NOENT)
2055
2056
2057 def _ApplyContainerMods(kind, container, chgdesc, mods,
2058 create_fn, modify_fn, remove_fn):
2059 """Applies descriptions in C{mods} to C{container}.
2060
2061 @type kind: string
2062 @param kind: One-word item description
2063 @type container: list
2064 @param container: Container to modify
2065 @type chgdesc: None or list
2066 @param chgdesc: List of applied changes
2067 @type mods: list
2068 @param mods: Modifications as returned by L{_PrepareContainerMods}
2069 @type create_fn: callable
2070 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2071 receives absolute item index, parameters and private data object as added
2072 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2073 as list
2074 @type modify_fn: callable
2075 @param modify_fn: Callback for modifying an existing item
2076 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2077 and private data object as added by L{_PrepareContainerMods}, returns
2078 changes as list
2079 @type remove_fn: callable
2080 @param remove_fn: Callback on removing item; receives absolute item index,
2081 item and private data object as added by L{_PrepareContainerMods}
2082
2083 """
2084 for (op, identifier, params, private) in mods:
2085 changes = None
2086
2087 if op == constants.DDM_ADD:
2088 # Calculate where item will be added
2089 # When adding an item, identifier can only be an index
2090 try:
2091 idx = int(identifier)
2092 except ValueError:
2093 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2094 " identifier for %s" % constants.DDM_ADD,
2095 errors.ECODE_INVAL)
2096 if idx == -1:
2097 addidx = len(container)
2098 else:
2099 if idx < 0:
2100 raise IndexError("Not accepting negative indices other than -1")
2101 elif idx > len(container):
2102 raise IndexError("Got %s index %s, but there are only %s" %
2103 (kind, idx, len(container)))
2104 addidx = idx
2105
2106 if create_fn is None:
2107 item = params
2108 else:
2109 (item, changes) = create_fn(addidx, params, private)
2110
2111 if idx == -1:
2112 container.append(item)
2113 else:
2114 assert idx >= 0
2115 assert idx <= len(container)
2116 # list.insert does so before the specified index
2117 container.insert(idx, item)
2118 else:
2119 # Retrieve existing item
2120 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2121
2122 if op == constants.DDM_REMOVE:
2123 assert not params
2124
2125 if remove_fn is not None:
2126 remove_fn(absidx, item, private)
2127
2128 changes = [("%s/%s" % (kind, absidx), "remove")]
2129
2130 assert container[absidx] == item
2131 del container[absidx]
2132 elif op == constants.DDM_MODIFY:
2133 if modify_fn is not None:
2134 changes = modify_fn(absidx, item, params, private)
2135 else:
2136 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2137
2138 assert _TApplyContModsCbChanges(changes)
2139
2140 if not (chgdesc is None or changes is None):
2141 chgdesc.extend(changes)
2142
2143
2144 def _UpdateIvNames(base_index, disks):
2145 """Updates the C{iv_name} attribute of disks.
2146
2147 @type disks: list of L{objects.Disk}
2148
2149 """
2150 for (idx, disk) in enumerate(disks):
2151 disk.iv_name = "disk/%s" % (base_index + idx, )
2152
2153
2154 class LUInstanceSetParams(LogicalUnit):
2155 """Modifies an instances's parameters.
2156
2157 """
2158 HPATH = "instance-modify"
2159 HTYPE = constants.HTYPE_INSTANCE
2160 REQ_BGL = False
2161
2162 @staticmethod
2163 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2164 assert ht.TList(mods)
2165 assert not mods or len(mods[0]) in (2, 3)
2166
2167 if mods and len(mods[0]) == 2:
2168 result = []
2169
2170 addremove = 0
2171 for op, params in mods:
2172 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2173 result.append((op, -1, params))
2174 addremove += 1
2175
2176 if addremove > 1:
2177 raise errors.OpPrereqError("Only one %s add or remove operation is"
2178 " supported at a time" % kind,
2179 errors.ECODE_INVAL)
2180 else:
2181 result.append((constants.DDM_MODIFY, op, params))
2182
2183 assert verify_fn(result)
2184 else:
2185 result = mods
2186
2187 return result
2188
2189 @staticmethod
2190 def _CheckMods(kind, mods, key_types, item_fn):
2191 """Ensures requested disk/NIC modifications are valid.
2192
2193 """
2194 for (op, _, params) in mods:
2195 assert ht.TDict(params)
2196
2197 # If 'key_types' is an empty dict, we assume we have an
2198 # 'ext' template and thus do not ForceDictType
2199 if key_types:
2200 utils.ForceDictType(params, key_types)
2201
2202 if op == constants.DDM_REMOVE:
2203 if params:
2204 raise errors.OpPrereqError("No settings should be passed when"
2205 " removing a %s" % kind,
2206 errors.ECODE_INVAL)
2207 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2208 item_fn(op, params)
2209 else:
2210 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2211
2212 @staticmethod
2213 def _VerifyDiskModification(op, params):
2214 """Verifies a disk modification.
2215
2216 """
2217 if op == constants.DDM_ADD:
2218 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2219 if mode not in constants.DISK_ACCESS_SET:
2220 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2221 errors.ECODE_INVAL)
2222
2223 size = params.get(constants.IDISK_SIZE, None)
2224 if size is None:
2225 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2226 constants.IDISK_SIZE, errors.ECODE_INVAL)
2227
2228 try:
2229 size = int(size)
2230 except (TypeError, ValueError), err:
2231 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2232 errors.ECODE_INVAL)
2233
2234 params[constants.IDISK_SIZE] = size
2235 name = params.get(constants.IDISK_NAME, None)
2236 if name is not None and name.lower() == constants.VALUE_NONE:
2237 params[constants.IDISK_NAME] = None
2238
2239 elif op == constants.DDM_MODIFY:
2240 if constants.IDISK_SIZE in params:
2241 raise errors.OpPrereqError("Disk size change not possible, use"
2242 " grow-disk", errors.ECODE_INVAL)
2243 if len(params) > 2:
2244 raise errors.OpPrereqError("Disk modification doesn't support"
2245 " additional arbitrary parameters",
2246 errors.ECODE_INVAL)
2247 name = params.get(constants.IDISK_NAME, None)
2248 if name is not None and name.lower() == constants.VALUE_NONE:
2249 params[constants.IDISK_NAME] = None
2250
2251 @staticmethod
2252 def _VerifyNicModification(op, params):
2253 """Verifies a network interface modification.
2254
2255 """
2256 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2257 ip = params.get(constants.INIC_IP, None)
2258 name = params.get(constants.INIC_NAME, None)
2259 req_net = params.get(constants.INIC_NETWORK, None)
2260 link = params.get(constants.NIC_LINK, None)
2261 mode = params.get(constants.NIC_MODE, None)
2262 if name is not None and name.lower() == constants.VALUE_NONE:
2263 params[constants.INIC_NAME] = None
2264 if req_net is not None:
2265 if req_net.lower() == constants.VALUE_NONE:
2266 params[constants.INIC_NETWORK] = None
2267 req_net = None
2268 elif link is not None or mode is not None:
2269 raise errors.OpPrereqError("If network is given"
2270 " mode or link should not",
2271 errors.ECODE_INVAL)
2272
2273 if op == constants.DDM_ADD:
2274 macaddr = params.get(constants.INIC_MAC, None)
2275 if macaddr is None:
2276 params[constants.INIC_MAC] = constants.VALUE_AUTO
2277
2278 if ip is not None:
2279 if ip.lower() == constants.VALUE_NONE:
2280 params[constants.INIC_IP] = None
2281 else:
2282 if ip.lower() == constants.NIC_IP_POOL:
2283 if op == constants.DDM_ADD and req_net is None:
2284 raise errors.OpPrereqError("If ip=pool, parameter network"
2285 " cannot be none",
2286 errors.ECODE_INVAL)
2287 else:
2288 if not netutils.IPAddress.IsValid(ip):
2289 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2290 errors.ECODE_INVAL)
2291
2292 if constants.INIC_MAC in params:
2293 macaddr = params[constants.INIC_MAC]
2294 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2295 macaddr = utils.NormalizeAndValidateMac(macaddr)
2296
2297 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2298 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2299 " modifying an existing NIC",
2300 errors.ECODE_INVAL)
2301
2302 def CheckArguments(self):
2303 if not (self.op.nics or self.op.disks or self.op.disk_template or
2304 self.op.hvparams or self.op.beparams or self.op.os_name or
2305 self.op.offline is not None or self.op.runtime_mem or
2306 self.op.pnode):
2307 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2308
2309 if self.op.hvparams:
2310 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2311 "hypervisor", "instance", "cluster")
2312
2313 self.op.disks = self._UpgradeDiskNicMods(
2314 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2315 self.op.nics = self._UpgradeDiskNicMods(
2316 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2317
2318 if self.op.disks and self.op.disk_template is not None:
2319 raise errors.OpPrereqError("Disk template conversion and other disk"
2320 " changes not supported at the same time",
2321 errors.ECODE_INVAL)
2322
2323 if (self.op.disk_template and
2324 self.op.disk_template in constants.DTS_INT_MIRROR and
2325 self.op.remote_node is None):
2326 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2327 " one requires specifying a secondary node",
2328 errors.ECODE_INVAL)
2329
2330 # Check NIC modifications
2331 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2332 self._VerifyNicModification)
2333
2334 if self.op.pnode:
2335 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2336
2337 def ExpandNames(self):
2338 self._ExpandAndLockInstance()
2339 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2340 # Can't even acquire node locks in shared mode as upcoming changes in
2341 # Ganeti 2.6 will start to modify the node object on disk conversion
2342 self.needed_locks[locking.LEVEL_NODE] = []
2343 self.needed_locks[locking.LEVEL_NODE_RES] = []
2344 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2345 # Look node group to look up the ipolicy
2346 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2347
2348 def DeclareLocks(self, level):
2349 if level == locking.LEVEL_NODEGROUP:
2350 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2351 # Acquire locks for the instance's nodegroups optimistically. Needs
2352 # to be verified in CheckPrereq
2353 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2354 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2355 elif level == locking.LEVEL_NODE:
2356 self._LockInstancesNodes()
2357 if self.op.disk_template and self.op.remote_node:
2358 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2359 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2360 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2361 # Copy node locks
2362 self.needed_locks[locking.LEVEL_NODE_RES] = \
2363 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2364
2365 def BuildHooksEnv(self):
2366 """Build hooks env.
2367
2368 This runs on the master, primary and secondaries.
2369
2370 """
2371 args = {}
2372 if constants.BE_MINMEM in self.be_new:
2373 args["minmem"] = self.be_new[constants.BE_MINMEM]
2374 if constants.BE_MAXMEM in self.be_new:
2375 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2376 if constants.BE_VCPUS in self.be_new:
2377 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2378 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2379 # information at all.
2380
2381 if self._new_nics is not None:
2382 nics = []
2383
2384 for nic in self._new_nics:
2385 n = copy.deepcopy(nic)
2386 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2387 n.nicparams = nicparams
2388 nics.append(NICToTuple(self, n))
2389
2390 args["nics"] = nics
2391
2392 env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2393 if self.op.disk_template:
2394 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2395 if self.op.runtime_mem:
2396 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2397
2398 return env
2399
2400 def BuildHooksNodes(self):
2401 """Build hooks nodes.
2402
2403 """
2404 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2405 return (nl, nl)
2406
2407 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2408 old_params, cluster, pnode):
2409
2410 update_params_dict = dict([(key, params[key])
2411 for key in constants.NICS_PARAMETERS
2412 if key in params])
2413
2414 req_link = update_params_dict.get(constants.NIC_LINK, None)
2415 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2416
2417 new_net_uuid = None
2418 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2419 if new_net_uuid_or_name:
2420 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2421 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2422
2423 if old_net_uuid:
2424 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2425
2426 if new_net_uuid:
2427 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2428 if not netparams:
2429 raise errors.OpPrereqError("No netparams found for the network"
2430 " %s, probably not connected" %
2431 new_net_obj.name, errors.ECODE_INVAL)
2432 new_params = dict(netparams)
2433 else:
2434 new_params = GetUpdatedParams(old_params, update_params_dict)
2435
2436 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2437
2438 new_filled_params = cluster.SimpleFillNIC(new_params)
2439 objects.NIC.CheckParameterSyntax(new_filled_params)
2440
2441 new_mode = new_filled_params[constants.NIC_MODE]
2442 if new_mode == constants.NIC_MODE_BRIDGED:
2443 bridge = new_filled_params[constants.NIC_LINK]
2444 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2445 if msg:
2446 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2447 if self.op.force:
2448 self.warn.append(msg)
2449 else:
2450 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2451
2452 elif new_mode == constants.NIC_MODE_ROUTED:
2453 ip = params.get(constants.INIC_IP, old_ip)
2454 if ip is None:
2455 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2456 " on a routed NIC", errors.ECODE_INVAL)
2457
2458 elif new_mode == constants.NIC_MODE_OVS:
2459 # TODO: check OVS link
2460 self.LogInfo("OVS links are currently not checked for correctness")
2461
2462 if constants.INIC_MAC in params:
2463 mac = params[constants.INIC_MAC]
2464 if mac is None:
2465 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2466 errors.ECODE_INVAL)
2467 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2468 # otherwise generate the MAC address
2469 params[constants.INIC_MAC] = \
2470 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2471 else:
2472 # or validate/reserve the current one
2473 try:
2474 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2475 except errors.ReservationError:
2476 raise errors.OpPrereqError("MAC address '%s' already in use"
2477 " in cluster" % mac,
2478 errors.ECODE_NOTUNIQUE)
2479 elif new_net_uuid != old_net_uuid:
2480
2481 def get_net_prefix(net_uuid):
2482 mac_prefix = None
2483 if net_uuid:
2484 nobj = self.cfg.GetNetwork(net_uuid)
2485 mac_prefix = nobj.mac_prefix
2486
2487 return mac_prefix
2488
2489 new_prefix = get_net_prefix(new_net_uuid)
2490 old_prefix = get_net_prefix(old_net_uuid)
2491 if old_prefix != new_prefix:
2492 params[constants.INIC_MAC] = \
2493 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2494
2495 # if there is a change in (ip, network) tuple
2496 new_ip = params.get(constants.INIC_IP, old_ip)
2497 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2498 if new_ip:
2499 # if IP is pool then require a network and generate one IP
2500 if new_ip.lower() == constants.NIC_IP_POOL:
2501 if new_net_uuid:
2502 try:
2503 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2504 except errors.ReservationError:
2505 raise errors.OpPrereqError("Unable to get a free IP"
2506 " from the address pool",
2507 errors.ECODE_STATE)
2508 self.LogInfo("Chose IP %s from network %s",
2509 new_ip,
2510 new_net_obj.name)
2511 params[constants.INIC_IP] = new_ip
2512 else:
2513 raise errors.OpPrereqError("ip=pool, but no network found",
2514 errors.ECODE_INVAL)
2515 # Reserve new IP if in the new network if any
2516 elif new_net_uuid:
2517 try:
2518 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2519 self.LogInfo("Reserving IP %s in network %s",
2520 new_ip, new_net_obj.name)
2521 except errors.ReservationError:
2522 raise errors.OpPrereqError("IP %s not available in network %s" %
2523 (new_ip, new_net_obj.name),
2524 errors.ECODE_NOTUNIQUE)
2525 # new network is None so check if new IP is a conflicting IP
2526 elif self.op.conflicts_check:
2527 _CheckForConflictingIp(self, new_ip, pnode)
2528
2529 # release old IP if old network is not None
2530 if old_ip and old_net_uuid:
2531 try:
2532 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2533 except errors.AddressPoolError:
2534 logging.warning("Release IP %s not contained in network %s",
2535 old_ip, old_net_obj.name)
2536
2537 # there are no changes in (ip, network) tuple and old network is not None
2538 elif (old_net_uuid is not None and
2539 (req_link is not None or req_mode is not None)):
2540 raise errors.OpPrereqError("Not allowed to change link or mode of"
2541 " a NIC that is connected to a network",
2542 errors.ECODE_INVAL)
2543
2544 private.params = new_params
2545 private.filled = new_filled_params
2546
2547 def _PreCheckDiskTemplate(self, pnode_info):
2548 """CheckPrereq checks related to a new disk template."""
2549 # Arguments are passed to avoid configuration lookups
2550 instance = self.instance
2551 pnode = instance.primary_node
2552 cluster = self.cluster
2553 if instance.disk_template == self.op.disk_template:
2554 raise errors.OpPrereqError("Instance already has disk template %s" %
2555 instance.disk_template, errors.ECODE_INVAL)
2556
2557 if (instance.disk_template,
2558 self.op.disk_template) not in self._DISK_CONVERSIONS:
2559 raise errors.OpPrereqError("Unsupported disk template conversion from"
2560 " %s to %s" % (instance.disk_template,
2561 self.op.disk_template),
2562 errors.ECODE_INVAL)
2563 CheckInstanceState(self, instance, INSTANCE_DOWN,
2564 msg="cannot change disk template")
2565 if self.op.disk_template in constants.DTS_INT_MIRROR:
2566 if self.op.remote_node == pnode:
2567 raise errors.OpPrereqError("Given new secondary node %s is the same"
2568 " as the primary node of the instance" %
2569 self.op.remote_node, errors.ECODE_STATE)
2570 CheckNodeOnline(self, self.op.remote_node)
2571 CheckNodeNotDrained(self, self.op.remote_node)
2572 # FIXME: here we assume that the old instance type is DT_PLAIN
2573 assert instance.disk_template == constants.DT_PLAIN
2574 disks = [{constants.IDISK_SIZE: d.size,
2575 constants.IDISK_VG: d.logical_id[0]}
2576 for d in instance.disks]
2577 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2578 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2579
2580 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2581 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2582 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2583 snode_group)
2584 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2585 ignore=self.op.ignore_ipolicy)
2586 if pnode_info.group != snode_info.group:
2587 self.LogWarning("The primary and secondary nodes are in two"
2588 " different node groups; the disk parameters"
2589 " from the first disk's node group will be"
2590 " used")
2591
2592 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2593 # Make sure none of the nodes require exclusive storage
2594 nodes = [pnode_info]
2595 if self.op.disk_template in constants.DTS_INT_MIRROR:
2596 assert snode_info
2597 nodes.append(snode_info)
2598 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2599 if compat.any(map(has_es, nodes)):
2600 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2601 " storage is enabled" % (instance.disk_template,
2602 self.op.disk_template))
2603 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2604
2605 def CheckPrereq(self):
2606 """Check prerequisites.
2607
2608 This only checks the instance list against the existing names.
2609
2610 """
2611 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2612 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2613
2614 cluster = self.cluster = self.cfg.GetClusterInfo()
2615 assert self.instance is not None, \
2616 "Cannot retrieve locked instance %s" % self.op.instance_name
2617
2618 pnode = instance.primary_node
2619
2620 self.warn = []
2621
2622 if (self.op.pnode is not None and self.op.pnode != pnode and
2623 not self.op.force):
2624 # verify that the instance is not up
2625 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2626 instance.hypervisor)
2627 if instance_info.fail_msg:
2628 self.warn.append("Can't get instance runtime information: %s" %
2629 instance_info.fail_msg)
2630 elif instance_info.payload:
2631 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2632 errors.ECODE_STATE)
2633
2634 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2635 nodelist = list(instance.all_nodes)
2636 pnode_info = self.cfg.GetNodeInfo(pnode)
2637 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2638
2639 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2640 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2641 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2642
2643 # dictionary with instance information after the modification
2644 ispec = {}
2645
2646 # Check disk modifications. This is done here and not in CheckArguments
2647 # (as with NICs), because we need to know the instance's disk template
2648 if instance.disk_template == constants.DT_EXT:
2649 self._CheckMods("disk", self.op.disks, {},
2650 self._VerifyDiskModification)
2651 else:
2652 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2653 self._VerifyDiskModification)
2654
2655 # Prepare disk/NIC modifications
2656 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2657 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2658
2659 # Check the validity of the `provider' parameter
2660 if instance.disk_template in constants.DT_EXT:
2661 for mod in self.diskmod:
2662 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2663 if mod[0] == constants.DDM_ADD:
2664 if ext_provider is None:
2665 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2666 " '%s' missing, during disk add" %
2667 (constants.DT_EXT,
2668 constants.IDISK_PROVIDER),
2669 errors.ECODE_NOENT)
2670 elif mod[0] == constants.DDM_MODIFY:
2671 if ext_provider:
2672 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2673 " modification" %
2674 constants.IDISK_PROVIDER,
2675 errors.ECODE_INVAL)
2676 else:
2677 for mod in self.diskmod:
2678 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2679 if ext_provider is not None:
2680 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2681 " instances of type '%s'" %
2682 (constants.IDISK_PROVIDER,
2683 constants.DT_EXT),
2684 errors.ECODE_INVAL)
2685
2686 # OS change
2687 if self.op.os_name and not self.op.force:
2688 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2689 self.op.force_variant)
2690 instance_os = self.op.os_name
2691 else:
2692 instance_os = instance.os
2693
2694 assert not (self.op.disk_template and self.op.disks), \
2695 "Can't modify disk template and apply disk changes at the same time"
2696
2697 if self.op.disk_template:
2698 self._PreCheckDiskTemplate(pnode_info)
2699
2700 # hvparams processing
2701 if self.op.hvparams:
2702 hv_type = instance.hypervisor
2703 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2704 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2705 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2706
2707 # local check
2708 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2709 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2710 self.hv_proposed = self.hv_new = hv_new # the new actual values
2711 self.hv_inst = i_hvdict # the new dict (without defaults)
2712 else:
2713 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2714 instance.hvparams)
2715 self.hv_new = self.hv_inst = {}
2716
2717 # beparams processing
2718 if self.op.beparams:
2719 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2720 use_none=True)
2721 objects.UpgradeBeParams(i_bedict)
2722 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2723 be_new = cluster.SimpleFillBE(i_bedict)
2724 self.be_proposed = self.be_new = be_new # the new actual values
2725 self.be_inst = i_bedict # the new dict (without defaults)
2726 else:
2727 self.be_new = self.be_inst = {}
2728 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2729 be_old = cluster.FillBE(instance)
2730
2731 # CPU param validation -- checking every time a parameter is
2732 # changed to cover all cases where either CPU mask or vcpus have
2733 # changed
2734 if (constants.BE_VCPUS in self.be_proposed and
2735 constants.HV_CPU_MASK in self.hv_proposed):
2736 cpu_list = \
2737 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2738 # Verify mask is consistent with number of vCPUs. Can skip this
2739 # test if only 1 entry in the CPU mask, which means same mask
2740 # is applied to all vCPUs.
2741 if (len(cpu_list) > 1 and
2742 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2743 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2744 " CPU mask [%s]" %
2745 (self.be_proposed[constants.BE_VCPUS],
2746 self.hv_proposed[constants.HV_CPU_MASK]),
2747 errors.ECODE_INVAL)
2748
2749 # Only perform this test if a new CPU mask is given
2750 if constants.HV_CPU_MASK in self.hv_new:
2751 # Calculate the largest CPU number requested
2752 max_requested_cpu = max(map(max, cpu_list))
2753 # Check that all of the instance's nodes have enough physical CPUs to
2754 # satisfy the requested CPU mask
2755 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2756 max_requested_cpu + 1, instance.hypervisor)
2757
2758 # osparams processing
2759 if self.op.osparams:
2760 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2761 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2762 self.os_inst = i_osdict # the new dict (without defaults)
2763 else:
2764 self.os_inst = {}
2765
2766 #TODO(dynmem): do the appropriate check involving MINMEM
2767 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2768 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2769 mem_check_list = [pnode]
2770 if be_new[constants.BE_AUTO_BALANCE]:
2771 # either we changed auto_balance to yes or it was from before
2772 mem_check_list.extend(instance.secondary_nodes)
2773 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2774 instance.hypervisor)
2775 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2776 [instance.hypervisor], False)
2777 pninfo = nodeinfo[pnode]
2778 msg = pninfo.fail_msg
2779 if msg:
2780 # Assume the primary node is unreachable and go ahead
2781 self.warn.append("Can't get info from primary node %s: %s" %
2782 (pnode, msg))
2783 else:
2784 (_, _, (pnhvinfo, )) = pninfo.payload
2785 if not isinstance(pnhvinfo.get("memory_free", None), int):
2786 self.warn.append("Node data from primary node %s doesn't contain"
2787 " free memory information" % pnode)
2788 elif instance_info.fail_msg:
2789 self.warn.append("Can't get instance runtime information: %s" %
2790 instance_info.fail_msg)
2791 else:
2792 if instance_info.payload:
2793 current_mem = int(instance_info.payload["memory"])
2794 else:
2795 # Assume instance not running
2796 # (there is a slight race condition here, but it's not very
2797 # probable, and we have no other way to check)
2798 # TODO: Describe race condition
2799 current_mem = 0
2800 #TODO(dynmem): do the appropriate check involving MINMEM
2801 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2802 pnhvinfo["memory_free"])
2803 if miss_mem > 0:
2804 raise errors.OpPrereqError("This change will prevent the instance"
2805 " from starting, due to %d MB of memory"
2806 " missing on its primary node" %
2807 miss_mem, errors.ECODE_NORES)
2808
2809 if be_new[constants.BE_AUTO_BALANCE]:
2810 for node, nres in nodeinfo.items():
2811 if node not in instance.secondary_nodes:
2812 continue
2813 nres.Raise("Can't get info from secondary node %s" % node,
2814 prereq=True, ecode=errors.ECODE_STATE)
2815 (_, _, (nhvinfo, )) = nres.payload
2816 if not isinstance(nhvinfo.get("memory_free", None), int):
2817 raise errors.OpPrereqError("Secondary node %s didn't return free"
2818 " memory information" % node,
2819 errors.ECODE_STATE)
2820 #TODO(dynmem): do the appropriate check involving MINMEM
2821 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2822 raise errors.OpPrereqError("This change will prevent the instance"
2823 " from failover to its secondary node"
2824 " %s, due to not enough memory" % node,
2825 errors.ECODE_STATE)
2826
2827 if self.op.runtime_mem:
2828 remote_info = self.rpc.call_instance_info(instance.primary_node,
2829 instance.name,
2830 instance.hypervisor)
2831 remote_info.Raise("Error checking node %s" % instance.primary_node)
2832 if not remote_info.payload: # not running already
2833 raise errors.OpPrereqError("Instance %s is not running" %
2834 instance.name, errors.ECODE_STATE)
2835
2836 current_memory = remote_info.payload["memory"]
2837 if (not self.op.force and
2838 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2839 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2840 raise errors.OpPrereqError("Instance %s must have memory between %d"
2841 " and %d MB of memory unless --force is"
2842 " given" %
2843 (instance.name,
2844 self.be_proposed[constants.BE_MINMEM],
2845 self.be_proposed[constants.BE_MAXMEM]),
2846 errors.ECODE_INVAL)
2847
2848 delta = self.op.runtime_mem - current_memory
2849 if delta > 0:
2850 CheckNodeFreeMemory(self, instance.primary_node,
2851 "ballooning memory for instance %s" %
2852 instance.name, delta, instance.hypervisor)
2853
2854 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2855 raise errors.OpPrereqError("Disk operations not supported for"
2856 " diskless instances", errors.ECODE_INVAL)
2857
2858 def _PrepareNicCreate(_, params, private):
2859 self._PrepareNicModification(params, private, None, None,
2860 {}, cluster, pnode)
2861 return (None, None)
2862
2863 def _PrepareNicMod(_, nic, params, private):
2864 self._PrepareNicModification(params, private, nic.ip, nic.network,
2865 nic.nicparams, cluster, pnode)
2866 return None
2867
2868 def _PrepareNicRemove(_, params, __):
2869 ip = params.ip
2870 net = params.network
2871 if net is not None and ip is not None:
2872 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2873
2874 # Verify NIC changes (operating on copy)
2875 nics = instance.nics[:]
2876 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2877 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2878 if len(nics) > constants.MAX_NICS:
2879 raise errors.OpPrereqError("Instance has too many network interfaces"
2880 " (%d), cannot add more" % constants.MAX_NICS,
2881 errors.ECODE_STATE)
2882
2883 def _PrepareDiskMod(_, disk, params, __):
2884 disk.name = params.get(constants.IDISK_NAME, None)
2885
2886 # Verify disk changes (operating on a copy)
2887 disks = copy.deepcopy(instance.disks)
2888 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2889 _PrepareDiskMod, None)
2890 utils.ValidateDeviceNames("disk", disks)
2891 if len(disks) > constants.MAX_DISKS:
2892 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2893 " more" % constants.MAX_DISKS,
2894 errors.ECODE_STATE)
2895 disk_sizes = [disk.size for disk in instance.disks]
2896 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2897 self.diskmod if op == constants.DDM_ADD)
2898 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2899 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2900
2901 if self.op.offline is not None and self.op.offline:
2902 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2903 msg="can't change to offline")
2904
2905 # Pre-compute NIC changes (necessary to use result in hooks)
2906 self._nic_chgdesc = []
2907 if self.nicmod:
2908 # Operate on copies as this is still in prereq
2909 nics = [nic.Copy() for nic in instance.nics]
2910 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2911 self._CreateNewNic, self._ApplyNicMods, None)
2912 # Verify that NIC names are unique and valid
2913 utils.ValidateDeviceNames("NIC", nics)
2914 self._new_nics = nics
2915 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2916 else:
2917 self._new_nics = None
2918 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2919
2920 if not self.op.ignore_ipolicy:
2921 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2922 group_info)
2923
2924 # Fill ispec with backend parameters
2925 ispec[constants.ISPEC_SPINDLE_USE] = \
2926 self.be_new.get(constants.BE_SPINDLE_USE, None)
2927 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2928 None)
2929
2930 # Copy ispec to verify parameters with min/max values separately
2931 if self.op.disk_template:
2932 new_disk_template = self.op.disk_template
2933 else:
2934 new_disk_template = instance.disk_template
2935 ispec_max = ispec.copy()
2936 ispec_max[constants.ISPEC_MEM_SIZE] = \
2937 self.be_new.get(constants.BE_MAXMEM, None)
2938 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2939 new_disk_template)
2940 ispec_min = ispec.copy()
2941 ispec_min[constants.ISPEC_MEM_SIZE] = \
2942 self.be_new.get(constants.BE_MINMEM, None)
2943 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2944 new_disk_template)
2945
2946 if (res_max or res_min):
2947 # FIXME: Improve error message by including information about whether
2948 # the upper or lower limit of the parameter fails the ipolicy.
2949 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2950 (group_info, group_info.name,
2951 utils.CommaJoin(set(res_max + res_min))))
2952 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2953
2954 def _ConvertPlainToDrbd(self, feedback_fn):
2955 """Converts an instance from plain to drbd.
2956
2957 """
2958 feedback_fn("Converting template to drbd")
2959 instance = self.instance
2960 pnode = instance.primary_node
2961 snode = self.op.remote_node
2962
2963 assert instance.disk_template == constants.DT_PLAIN
2964
2965 # create a fake disk info for _GenerateDiskTemplate
2966 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2967 constants.IDISK_VG: d.logical_id[0],
2968 constants.IDISK_NAME: d.name}
2969 for d in instance.disks]
2970 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2971 instance.name, pnode, [snode],
2972 disk_info, None, None, 0, feedback_fn,
2973 self.diskparams)
2974 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2975 self.diskparams)
2976 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2977 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2978 info = GetInstanceInfoText(instance)
2979 feedback_fn("Creating additional volumes...")
2980 # first, create the missing data and meta devices
2981 for disk in anno_disks:
2982 # unfortunately this is... not too nice
2983 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2984 info, True, p_excl_stor)
2985 for child in disk.children:
2986 CreateSingleBlockDev(self, snode, instance, child, info, True,
2987 s_excl_stor)
2988 # at this stage, all new LVs have been created, we can rename the
2989 # old ones
2990 feedback_fn("Renaming original volumes...")
2991 rename_list = [(o, n.children[0].logical_id)
2992 for (o, n) in zip(instance.disks, new_disks)]
2993 result = self.rpc.call_blockdev_rename(pnode, rename_list)
2994 result.Raise("Failed to rename original LVs")
2995
2996 feedback_fn("Initializing DRBD devices...")
2997 # all child devices are in place, we can now create the DRBD devices
2998 try:
2999 for disk in anno_disks:
3000 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3001 f_create = node == pnode
3002 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3003 excl_stor)
3004 except errors.GenericError, e:
3005 feedback_fn("Initializing of DRBD devices failed;"
3006 " renaming back original volumes...")
3007 for disk in new_disks:
3008 self.cfg.SetDiskID(disk, pnode)
3009 rename_back_list = [(n.children[0], o.logical_id)
3010 for (n, o) in zip(new_disks, instance.disks)]
3011 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3012 result.Raise("Failed to rename LVs back after error %s" % str(e))
3013 raise
3014
3015 # at this point, the instance has been modified
3016 instance.disk_template = constants.DT_DRBD8
3017 instance.disks = new_disks
3018 self.cfg.Update(instance, feedback_fn)
3019
3020 # Release node locks while waiting for sync
3021 ReleaseLocks(self, locking.LEVEL_NODE)
3022
3023 # disks are created, waiting for sync
3024 disk_abort = not WaitForSync(self, instance,
3025 oneshot=not self.op.wait_for_sync)
3026 if disk_abort:
3027 raise errors.OpExecError("There are some degraded disks for"
3028 " this instance, please cleanup manually")
3029
3030 # Node resource locks will be released by caller
3031
3032 def _ConvertDrbdToPlain(self, feedback_fn):
3033 """Converts an instance from drbd to plain.
3034
3035 """
3036 instance = self.instance
3037
3038 assert len(instance.secondary_nodes) == 1
3039 assert instance.disk_template == constants.DT_DRBD8
3040
3041 pnode = instance.primary_node
3042 snode = instance.secondary_nodes[0]
3043 feedback_fn("Converting template to plain")
3044
3045 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3046 new_disks = [d.children[0] for d in instance.disks]
3047
3048 # copy over size, mode and name
3049 for parent, child in zip(old_disks, new_disks):
3050 child.size = parent.size
3051 child.mode = parent.mode
3052 child.name = parent.name
3053
3054 # this is a DRBD disk, return its port to the pool
3055 # NOTE: this must be done right before the call to cfg.Update!
3056 for disk in old_disks:
3057 tcp_port = disk.logical_id[2]
3058 self.cfg.AddTcpUdpPort(tcp_port)
3059
3060 # update instance structure
3061 instance.disks = new_disks
3062 instance.disk_template = constants.DT_PLAIN
3063 _UpdateIvNames(0, instance.disks)
3064 self.cfg.Update(instance, feedback_fn)
3065
3066 # Release locks in case removing disks takes a while
3067 ReleaseLocks(self, locking.LEVEL_NODE)
3068
3069 feedback_fn("Removing volumes on the secondary node...")
3070 for disk in old_disks:
3071 self.cfg.SetDiskID(disk, snode)
3072 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3073 if msg:
3074 self.LogWarning("Could not remove block device %s on node %s,"
3075 " continuing anyway: %s", disk.iv_name, snode, msg)
3076
3077 feedback_fn("Removing unneeded volumes on the primary node...")
3078 for idx, disk in enumerate(old_disks):
3079 meta = disk.children[1]
3080 self.cfg.SetDiskID(meta, pnode)
3081 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3082 if msg:
3083 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3084 " continuing anyway: %s", idx, pnode, msg)
3085
3086 def _CreateNewDisk(self, idx, params, _):
3087 """Creates a new disk.
3088
3089 """
3090 instance = self.instance
3091
3092 # add a new disk
3093 if instance.disk_template in constants.DTS_FILEBASED:
3094 (file_driver, file_path) = instance.disks[0].logical_id
3095 file_path = os.path.dirname(file_path)
3096 else:
3097 file_driver = file_path = None
3098
3099 disk = \
3100 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3101 instance.primary_node, instance.secondary_nodes,
3102 [params], file_path, file_driver, idx,
3103 self.Log, self.diskparams)[0]
3104
3105 info = GetInstanceInfoText(instance)
3106
3107 logging.info("Creating volume %s for instance %s",
3108 disk.iv_name, instance.name)
3109 # Note: this needs to be kept in sync with _CreateDisks
3110 #HARDCODE
3111 for node in instance.all_nodes:
3112 f_create = (node == instance.primary_node)
3113 try:
3114 CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
3115 except errors.OpExecError, err:
3116 self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
3117 disk.iv_name, disk, node, err)
3118
3119 if self.cluster.prealloc_wipe_disks:
3120 # Wipe new disk
3121 WipeDisks(self, instance,
3122 disks=[(idx, disk, 0)])
3123
3124 return (disk, [
3125 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3126 ])
3127
3128 @staticmethod
3129 def _ModifyDisk(idx, disk, params, _):
3130 """Modifies a disk.
3131
3132 """
3133 changes = []
3134 mode = params.get(constants.IDISK_MODE, None)
3135 if mode:
3136 disk.mode = mode
3137 changes.append(("disk.mode/%d" % idx, disk.mode))
3138
3139 name = params.get(constants.IDISK_NAME, None)
3140 disk.name = name
3141 changes.append(("disk.name/%d" % idx, disk.name))
3142
3143 return changes
3144
3145 def _RemoveDisk(self, idx, root, _):
3146 """Removes a disk.
3147
3148 """
3149 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3150 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3151 self.cfg.SetDiskID(disk, node)
3152 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3153 if msg:
3154 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3155 " continuing anyway", idx, node, msg)
3156
3157 # if this is a DRBD disk, return its port to the pool
3158 if root.dev_type in constants.LDS_DRBD:
3159 self.cfg.AddTcpUdpPort(root.logical_id[2])
3160
3161 def _CreateNewNic(self, idx, params, private):
3162 """Creates data structure for a new network interface.
3163
3164 """
3165 mac = params[constants.INIC_MAC]
3166 ip = params.get(constants.INIC_IP, None)
3167 net = params.get(constants.INIC_NETWORK, None)
3168 name = params.get(constants.INIC_NAME, None)
3169 net_uuid = self.cfg.LookupNetwork(net)
3170 #TODO: not private.filled?? can a nic have no nicparams??
3171 nicparams = private.filled
3172 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3173 nicparams=nicparams)
3174 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3175
3176 return (nobj, [
3177 ("nic.%d" % idx,
3178 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3179 (mac, ip, private.filled[constants.NIC_MODE],
3180 private.filled[constants.NIC_LINK],
3181 net)),
3182 ])
3183
3184 def _ApplyNicMods(self, idx, nic, params, private):
3185 """Modifies a network interface.
3186
3187 """
3188 changes = []
3189
3190 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3191 if key in params:
3192 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3193 setattr(nic, key, params[key])
3194
3195 new_net = params.get(constants.INIC_NETWORK, nic.network)
3196 new_net_uuid = self.cfg.LookupNetwork(new_net)
3197 if new_net_uuid != nic.network:
3198 changes.append(("nic.network/%d" % idx, new_net))
3199 nic.network = new_net_uuid
3200
3201 if private.filled:
3202 nic.nicparams = private.filled
3203
3204 for (key, val) in nic.nicparams.items():
3205 changes.append(("nic.%s/%d" % (key, idx), val))
3206
3207 return changes
3208
3209 def Exec(self, feedback_fn):
3210 """Modifies an instance.
3211
3212 All parameters take effect only at the next restart of the instance.
3213
3214 """
3215 # Process here the warnings from CheckPrereq, as we don't have a
3216 # feedback_fn there.
3217 # TODO: Replace with self.LogWarning
3218 for warn in self.warn:
3219 feedback_fn("WARNING: %s" % warn)
3220
3221 assert ((self.op.disk_template is None) ^
3222 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3223 "Not owning any node resource locks"
3224
3225 result = []
3226 instance = self.instance
3227
3228 # New primary node
3229 if self.op.pnode:
3230 instance.primary_node = self.op.pnode
3231
3232 # runtime memory
3233 if self.op.runtime_mem:
3234 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3235 instance,
3236 self.op.runtime_mem)
3237 rpcres.Raise("Cannot modify instance runtime memory")
3238 result.append(("runtime_memory", self.op.runtime_mem))
3239
3240 # Apply disk changes
3241 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3242 self._CreateNewDisk, self._ModifyDisk,
3243 self._RemoveDisk)
3244 _UpdateIvNames(0, instance.disks)
3245
3246 if self.op.disk_template:
3247 if __debug__:
3248 check_nodes = set(instance.all_nodes)
3249 if self.op.remote_node:
3250 check_nodes.add(self.op.remote_node)
3251 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3252 owned = self.owned_locks(level)
3253 assert not (check_nodes - owned), \
3254 ("Not owning the correct locks, owning %r, expected at least %r" %
3255 (owned, check_nodes))
3256
3257 r_shut = ShutdownInstanceDisks(self, instance)
3258 if not r_shut:
3259 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3260 " proceed with disk template conversion")
3261 mode = (instance.disk_template, self.op.disk_template)
3262 try:
3263 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3264 except:
3265 self.cfg.ReleaseDRBDMinors(instance.name)
3266 raise
3267 result.append(("disk_template", self.op.disk_template))
3268
3269 assert instance.disk_template == self.op.disk_template, \
3270 ("Expected disk template '%s', found '%s'" %
3271 (self.op.disk_template, instance.disk_template))
3272
3273 # Release node and resource locks if there are any (they might already have
3274 # been released during disk conversion)
3275 ReleaseLocks(self, locking.LEVEL_NODE)
3276 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3277
3278 # Apply NIC changes
3279 if self._new_nics is not None:
3280 instance.nics = self._new_nics
3281 result.extend(self._nic_chgdesc)
3282
3283 # hvparams changes
3284 if self.op.hvparams:
3285 instance.hvparams = self.hv_inst
3286 for key, val in self.op.hvparams.iteritems():
3287 result.append(("hv/%s" % key, val))
3288
3289 # beparams changes
3290 if self.op.beparams:
3291 instance.beparams = self.be_inst
3292 for key, val in self.op.beparams.iteritems():
3293 result.append(("be/%s" % key, val))
3294
3295 # OS change
3296 if self.op.os_name:
3297 instance.os = self.op.os_name
3298
3299 # osparams changes
3300 if self.op.osparams:
3301 instance.osparams = self.os_inst
3302 for key, val in self.op.osparams.iteritems():
3303 result.append(("os/%s" % key, val))
3304
3305 if self.op.offline is None:
3306 # Ignore
3307 pass
3308 elif self.op.offline:
3309 # Mark instance as offline
3310 self.cfg.MarkInstanceOffline(instance.name)
3311 result.append(("admin_state", constants.ADMINST_OFFLINE))
3312 else:
3313 # Mark instance as online, but stopped
3314 self.cfg.MarkInstanceDown(instance.name)
3315 result.append(("admin_state", constants.ADMINST_DOWN))
3316
3317 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3318
3319 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3320 self.owned_locks(locking.LEVEL_NODE)), \
3321 "All node locks should have been released by now"
3322
3323 return result
3324
3325 _DISK_CONVERSIONS = {
3326 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3327 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3328 }
3329
3330
3331 class LUInstanceChangeGroup(LogicalUnit):
3332 HPATH = "instance-change-group"
3333 HTYPE = constants.HTYPE_INSTANCE
3334 REQ_BGL = False
3335
3336 def ExpandNames(self):
3337 self.share_locks = ShareAll()
3338
3339 self.needed_locks = {
3340 locking.LEVEL_NODEGROUP: [],
3341 locking.LEVEL_NODE: [],
3342 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3343 }
3344
3345 self._ExpandAndLockInstance()
3346
3347 if self.op.target_groups:
3348 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3349 self.op.target_groups)
3350 else:
3351 self.req_target_uuids = None
3352
3353 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3354
3355 def DeclareLocks(self, level):
3356 if level == locking.LEVEL_NODEGROUP:
3357 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3358
3359 if self.req_target_uuids:
3360 lock_groups = set(self.req_target_uuids)
3361
3362 # Lock all groups used by instance optimistically; this requires going
3363 # via the node before it's locked, requiring verification later on
3364 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3365 lock_groups.update(instance_groups)
3366 else:
3367 # No target groups, need to lock all of them
3368 lock_groups = locking.ALL_SET
3369
3370 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3371
3372 elif level == locking.LEVEL_NODE:
3373 if self.req_target_uuids:
3374 # Lock all nodes used by instances
3375 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3376 self._LockInstancesNodes()
3377
3378 # Lock all nodes in all potential target groups
3379 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3380 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3381 member_nodes = [node_name
3382 for group in lock_groups
3383 for node_name in self.cfg.GetNodeGroup(group).members]
3384 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3385 else:
3386 # Lock all nodes as all groups are potential targets
3387 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3388
3389 def CheckPrereq(self):
3390 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3391 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3392 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3393
3394 assert (self.req_target_uuids is None or
3395 owned_groups.issuperset(self.req_target_uuids))
3396 assert owned_instances == set([self.op.instance_name])
3397
3398 # Get instance information
3399 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3400
3401 # Check if node groups for locked instance are still correct
3402 assert owned_nodes.issuperset(self.instance.all_nodes), \
3403 ("Instance %s's nodes changed while we kept the lock" %
3404 self.op.instance_name)
3405
3406 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3407 owned_groups)
3408
3409 if self.req_target_uuids:
3410 # User requested specific target groups
3411 self.target_uuids = frozenset(self.req_target_uuids)
3412 else:
3413 # All groups except those used by the instance are potential targets
3414 self.target_uuids = owned_groups - inst_groups
3415
3416 conflicting_groups = self.target_uuids & inst_groups
3417 if conflicting_groups:
3418 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3419 " used by the instance '%s'" %
3420 (utils.CommaJoin(conflicting_groups),
3421 self.op.instance_name),