cmdlib: Extract instance operation functionality
[ganeti-github.git] / lib / cmdlib / instance.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import itertools
27 import logging
28 import operator
29 import os
30
31 from ganeti import compat
32 from ganeti import constants
33 from ganeti import errors
34 from ganeti import ht
35 from ganeti import hypervisor
36 from ganeti import locking
37 from ganeti.masterd import iallocator
38 from ganeti import masterd
39 from ganeti import netutils
40 from ganeti import objects
41 from ganeti import opcodes
42 from ganeti import pathutils
43 from ganeti import qlang
44 from ganeti import rpc
45 from ganeti import utils
46 from ganeti import query
47
48 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
49 ResultWithJobs
50
51 from ganeti.cmdlib.common import INSTANCE_DOWN, \
52 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
53 _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
54 _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
55 _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
56 _GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
57 _GetUpdatedParams, _ExpandInstanceName, _ComputeIPolicySpecViolation, \
58 _CheckInstanceState, _ExpandNodeName
59 from ganeti.cmdlib.instance_storage import _CreateDisks, \
60 _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
61 _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
62 _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
63 _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
64 _AssembleInstanceDisks
65 from ganeti.cmdlib.instance_operation import _GetInstanceConsole
66 from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
67 _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
68 _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
69 _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
70 _GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
71 _CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
72
73 import ganeti.masterd.instance
74
75
76 #: Type description for changes as returned by L{ApplyContainerMods}'s
77 #: callbacks
78 _TApplyContModsCbChanges = \
79 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
80 ht.TNonEmptyString,
81 ht.TAny,
82 ])))
83
84
85 def _CheckHostnameSane(lu, name):
86 """Ensures that a given hostname resolves to a 'sane' name.
87
88 The given name is required to be a prefix of the resolved hostname,
89 to prevent accidental mismatches.
90
91 @param lu: the logical unit on behalf of which we're checking
92 @param name: the name we should resolve and check
93 @return: the resolved hostname object
94
95 """
96 hostname = netutils.GetHostname(name=name)
97 if hostname.name != name:
98 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
99 if not utils.MatchNameComponent(name, [hostname.name]):
100 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
101 " same as given hostname '%s'") %
102 (hostname.name, name), errors.ECODE_INVAL)
103 return hostname
104
105
106 def _CheckOpportunisticLocking(op):
107 """Generate error if opportunistic locking is not possible.
108
109 """
110 if op.opportunistic_locking and not op.iallocator:
111 raise errors.OpPrereqError("Opportunistic locking is only available in"
112 " combination with an instance allocator",
113 errors.ECODE_INVAL)
114
115
116 def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
117 """Wrapper around IAReqInstanceAlloc.
118
119 @param op: The instance opcode
120 @param disks: The computed disks
121 @param nics: The computed nics
122 @param beparams: The full filled beparams
123 @param node_whitelist: List of nodes which should appear as online to the
124 allocator (unless the node is already marked offline)
125
126 @returns: A filled L{iallocator.IAReqInstanceAlloc}
127
128 """
129 spindle_use = beparams[constants.BE_SPINDLE_USE]
130 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
131 disk_template=op.disk_template,
132 tags=op.tags,
133 os=op.os_type,
134 vcpus=beparams[constants.BE_VCPUS],
135 memory=beparams[constants.BE_MAXMEM],
136 spindle_use=spindle_use,
137 disks=disks,
138 nics=[n.ToDict() for n in nics],
139 hypervisor=op.hypervisor,
140 node_whitelist=node_whitelist)
141
142
143 def _ComputeFullBeParams(op, cluster):
144 """Computes the full beparams.
145
146 @param op: The instance opcode
147 @param cluster: The cluster config object
148
149 @return: The fully filled beparams
150
151 """
152 default_beparams = cluster.beparams[constants.PP_DEFAULT]
153 for param, value in op.beparams.iteritems():
154 if value == constants.VALUE_AUTO:
155 op.beparams[param] = default_beparams[param]
156 objects.UpgradeBeParams(op.beparams)
157 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
158 return cluster.SimpleFillBE(op.beparams)
159
160
161 def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
162 """Computes the nics.
163
164 @param op: The instance opcode
165 @param cluster: Cluster configuration object
166 @param default_ip: The default ip to assign
167 @param cfg: An instance of the configuration object
168 @param ec_id: Execution context ID
169
170 @returns: The build up nics
171
172 """
173 nics = []
174 for nic in op.nics:
175 nic_mode_req = nic.get(constants.INIC_MODE, None)
176 nic_mode = nic_mode_req
177 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
178 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
179
180 net = nic.get(constants.INIC_NETWORK, None)
181 link = nic.get(constants.NIC_LINK, None)
182 ip = nic.get(constants.INIC_IP, None)
183
184 if net is None or net.lower() == constants.VALUE_NONE:
185 net = None
186 else:
187 if nic_mode_req is not None or link is not None:
188 raise errors.OpPrereqError("If network is given, no mode or link"
189 " is allowed to be passed",
190 errors.ECODE_INVAL)
191
192 # ip validity checks
193 if ip is None or ip.lower() == constants.VALUE_NONE:
194 nic_ip = None
195 elif ip.lower() == constants.VALUE_AUTO:
196 if not op.name_check:
197 raise errors.OpPrereqError("IP address set to auto but name checks"
198 " have been skipped",
199 errors.ECODE_INVAL)
200 nic_ip = default_ip
201 else:
202 # We defer pool operations until later, so that the iallocator has
203 # filled in the instance's node(s) dimara
204 if ip.lower() == constants.NIC_IP_POOL:
205 if net is None:
206 raise errors.OpPrereqError("if ip=pool, parameter network"
207 " must be passed too",
208 errors.ECODE_INVAL)
209
210 elif not netutils.IPAddress.IsValid(ip):
211 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
212 errors.ECODE_INVAL)
213
214 nic_ip = ip
215
216 # TODO: check the ip address for uniqueness
217 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
218 raise errors.OpPrereqError("Routed nic mode requires an ip address",
219 errors.ECODE_INVAL)
220
221 # MAC address verification
222 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
223 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
224 mac = utils.NormalizeAndValidateMac(mac)
225
226 try:
227 # TODO: We need to factor this out
228 cfg.ReserveMAC(mac, ec_id)
229 except errors.ReservationError:
230 raise errors.OpPrereqError("MAC address %s already in use"
231 " in cluster" % mac,
232 errors.ECODE_NOTUNIQUE)
233
234 # Build nic parameters
235 nicparams = {}
236 if nic_mode_req:
237 nicparams[constants.NIC_MODE] = nic_mode
238 if link:
239 nicparams[constants.NIC_LINK] = link
240
241 check_params = cluster.SimpleFillNIC(nicparams)
242 objects.NIC.CheckParameterSyntax(check_params)
243 net_uuid = cfg.LookupNetwork(net)
244 name = nic.get(constants.INIC_NAME, None)
245 if name is not None and name.lower() == constants.VALUE_NONE:
246 name = None
247 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248 network=net_uuid, nicparams=nicparams)
249 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250 nics.append(nic_obj)
251
252 return nics
253
254
255 def _CheckForConflictingIp(lu, ip, node):
256 """In case of conflicting IP address raise error.
257
258 @type ip: string
259 @param ip: IP address
260 @type node: string
261 @param node: node name
262
263 """
264 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
265 if conf_net is not None:
266 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267 " network %s, but the target NIC does not." %
268 (ip, conf_net)),
269 errors.ECODE_STATE)
270
271 return (None, None)
272
273
274 def _ComputeIPolicyInstanceSpecViolation(
275 ipolicy, instance_spec, disk_template,
276 _compute_fn=_ComputeIPolicySpecViolation):
277 """Compute if instance specs meets the specs of ipolicy.
278
279 @type ipolicy: dict
280 @param ipolicy: The ipolicy to verify against
281 @param instance_spec: dict
282 @param instance_spec: The instance spec to verify
283 @type disk_template: string
284 @param disk_template: the disk template of the instance
285 @param _compute_fn: The function to verify ipolicy (unittest only)
286 @see: L{_ComputeIPolicySpecViolation}
287
288 """
289 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295
296 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297 disk_sizes, spindle_use, disk_template)
298
299
300 def _CheckOSVariant(os_obj, name):
301 """Check whether an OS name conforms to the os variants specification.
302
303 @type os_obj: L{objects.OS}
304 @param os_obj: OS object to check
305 @type name: string
306 @param name: OS name passed by the user, to check for validity
307
308 """
309 variant = objects.OS.GetVariant(name)
310 if not os_obj.supported_variants:
311 if variant:
312 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313 " passed)" % (os_obj.name, variant),
314 errors.ECODE_INVAL)
315 return
316 if not variant:
317 raise errors.OpPrereqError("OS name must include a variant",
318 errors.ECODE_INVAL)
319
320 if variant not in os_obj.supported_variants:
321 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322
323
324 class LUInstanceCreate(LogicalUnit):
325 """Create an instance.
326
327 """
328 HPATH = "instance-add"
329 HTYPE = constants.HTYPE_INSTANCE
330 REQ_BGL = False
331
332 def CheckArguments(self):
333 """Check arguments.
334
335 """
336 # do not require name_check to ease forward/backward compatibility
337 # for tools
338 if self.op.no_install and self.op.start:
339 self.LogInfo("No-installation mode selected, disabling startup")
340 self.op.start = False
341 # validate/normalize the instance name
342 self.op.instance_name = \
343 netutils.Hostname.GetNormalizedName(self.op.instance_name)
344
345 if self.op.ip_check and not self.op.name_check:
346 # TODO: make the ip check more flexible and not depend on the name check
347 raise errors.OpPrereqError("Cannot do IP address check without a name"
348 " check", errors.ECODE_INVAL)
349
350 # check nics' parameter names
351 for nic in self.op.nics:
352 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
353 # check that NIC's parameters names are unique and valid
354 utils.ValidateDeviceNames("NIC", self.op.nics)
355
356 # check that disk's names are unique and valid
357 utils.ValidateDeviceNames("disk", self.op.disks)
358
359 cluster = self.cfg.GetClusterInfo()
360 if not self.op.disk_template in cluster.enabled_disk_templates:
361 raise errors.OpPrereqError("Cannot create an instance with disk template"
362 " '%s', because it is not enabled in the"
363 " cluster. Enabled disk templates are: %s." %
364 (self.op.disk_template,
365 ",".join(cluster.enabled_disk_templates)))
366
367 # check disks. parameter names and consistent adopt/no-adopt strategy
368 has_adopt = has_no_adopt = False
369 for disk in self.op.disks:
370 if self.op.disk_template != constants.DT_EXT:
371 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
372 if constants.IDISK_ADOPT in disk:
373 has_adopt = True
374 else:
375 has_no_adopt = True
376 if has_adopt and has_no_adopt:
377 raise errors.OpPrereqError("Either all disks are adopted or none is",
378 errors.ECODE_INVAL)
379 if has_adopt:
380 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
381 raise errors.OpPrereqError("Disk adoption is not supported for the"
382 " '%s' disk template" %
383 self.op.disk_template,
384 errors.ECODE_INVAL)
385 if self.op.iallocator is not None:
386 raise errors.OpPrereqError("Disk adoption not allowed with an"
387 " iallocator script", errors.ECODE_INVAL)
388 if self.op.mode == constants.INSTANCE_IMPORT:
389 raise errors.OpPrereqError("Disk adoption not allowed for"
390 " instance import", errors.ECODE_INVAL)
391 else:
392 if self.op.disk_template in constants.DTS_MUST_ADOPT:
393 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
394 " but no 'adopt' parameter given" %
395 self.op.disk_template,
396 errors.ECODE_INVAL)
397
398 self.adopt_disks = has_adopt
399
400 # instance name verification
401 if self.op.name_check:
402 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
403 self.op.instance_name = self.hostname1.name
404 # used in CheckPrereq for ip ping check
405 self.check_ip = self.hostname1.ip
406 else:
407 self.check_ip = None
408
409 # file storage checks
410 if (self.op.file_driver and
411 not self.op.file_driver in constants.FILE_DRIVER):
412 raise errors.OpPrereqError("Invalid file driver name '%s'" %
413 self.op.file_driver, errors.ECODE_INVAL)
414
415 if self.op.disk_template == constants.DT_FILE:
416 opcodes.RequireFileStorage()
417 elif self.op.disk_template == constants.DT_SHARED_FILE:
418 opcodes.RequireSharedFileStorage()
419
420 ### Node/iallocator related checks
421 _CheckIAllocatorOrNode(self, "iallocator", "pnode")
422
423 if self.op.pnode is not None:
424 if self.op.disk_template in constants.DTS_INT_MIRROR:
425 if self.op.snode is None:
426 raise errors.OpPrereqError("The networked disk templates need"
427 " a mirror node", errors.ECODE_INVAL)
428 elif self.op.snode:
429 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
430 " template")
431 self.op.snode = None
432
433 _CheckOpportunisticLocking(self.op)
434
435 self._cds = _GetClusterDomainSecret()
436
437 if self.op.mode == constants.INSTANCE_IMPORT:
438 # On import force_variant must be True, because if we forced it at
439 # initial install, our only chance when importing it back is that it
440 # works again!
441 self.op.force_variant = True
442
443 if self.op.no_install:
444 self.LogInfo("No-installation mode has no effect during import")
445
446 elif self.op.mode == constants.INSTANCE_CREATE:
447 if self.op.os_type is None:
448 raise errors.OpPrereqError("No guest OS specified",
449 errors.ECODE_INVAL)
450 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
451 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
452 " installation" % self.op.os_type,
453 errors.ECODE_STATE)
454 if self.op.disk_template is None:
455 raise errors.OpPrereqError("No disk template specified",
456 errors.ECODE_INVAL)
457
458 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
459 # Check handshake to ensure both clusters have the same domain secret
460 src_handshake = self.op.source_handshake
461 if not src_handshake:
462 raise errors.OpPrereqError("Missing source handshake",
463 errors.ECODE_INVAL)
464
465 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
466 src_handshake)
467 if errmsg:
468 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
469 errors.ECODE_INVAL)
470
471 # Load and check source CA
472 self.source_x509_ca_pem = self.op.source_x509_ca
473 if not self.source_x509_ca_pem:
474 raise errors.OpPrereqError("Missing source X509 CA",
475 errors.ECODE_INVAL)
476
477 try:
478 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
479 self._cds)
480 except OpenSSL.crypto.Error, err:
481 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
482 (err, ), errors.ECODE_INVAL)
483
484 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
485 if errcode is not None:
486 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
487 errors.ECODE_INVAL)
488
489 self.source_x509_ca = cert
490
491 src_instance_name = self.op.source_instance_name
492 if not src_instance_name:
493 raise errors.OpPrereqError("Missing source instance name",
494 errors.ECODE_INVAL)
495
496 self.source_instance_name = \
497 netutils.GetHostname(name=src_instance_name).name
498
499 else:
500 raise errors.OpPrereqError("Invalid instance creation mode %r" %
501 self.op.mode, errors.ECODE_INVAL)
502
503 def ExpandNames(self):
504 """ExpandNames for CreateInstance.
505
506 Figure out the right locks for instance creation.
507
508 """
509 self.needed_locks = {}
510
511 instance_name = self.op.instance_name
512 # this is just a preventive check, but someone might still add this
513 # instance in the meantime, and creation will fail at lock-add time
514 if instance_name in self.cfg.GetInstanceList():
515 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
516 instance_name, errors.ECODE_EXISTS)
517
518 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
519
520 if self.op.iallocator:
521 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
522 # specifying a group on instance creation and then selecting nodes from
523 # that group
524 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
525 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
526
527 if self.op.opportunistic_locking:
528 self.opportunistic_locks[locking.LEVEL_NODE] = True
529 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
530 else:
531 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
532 nodelist = [self.op.pnode]
533 if self.op.snode is not None:
534 self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
535 nodelist.append(self.op.snode)
536 self.needed_locks[locking.LEVEL_NODE] = nodelist
537
538 # in case of import lock the source node too
539 if self.op.mode == constants.INSTANCE_IMPORT:
540 src_node = self.op.src_node
541 src_path = self.op.src_path
542
543 if src_path is None:
544 self.op.src_path = src_path = self.op.instance_name
545
546 if src_node is None:
547 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
548 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
549 self.op.src_node = None
550 if os.path.isabs(src_path):
551 raise errors.OpPrereqError("Importing an instance from a path"
552 " requires a source node option",
553 errors.ECODE_INVAL)
554 else:
555 self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
556 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
557 self.needed_locks[locking.LEVEL_NODE].append(src_node)
558 if not os.path.isabs(src_path):
559 self.op.src_path = src_path = \
560 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
561
562 self.needed_locks[locking.LEVEL_NODE_RES] = \
563 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
564
565 def _RunAllocator(self):
566 """Run the allocator based on input opcode.
567
568 """
569 if self.op.opportunistic_locking:
570 # Only consider nodes for which a lock is held
571 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
572 else:
573 node_whitelist = None
574
575 #TODO Export network to iallocator so that it chooses a pnode
576 # in a nodegroup that has the desired network connected to
577 req = _CreateInstanceAllocRequest(self.op, self.disks,
578 self.nics, self.be_full,
579 node_whitelist)
580 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
581
582 ial.Run(self.op.iallocator)
583
584 if not ial.success:
585 # When opportunistic locks are used only a temporary failure is generated
586 if self.op.opportunistic_locking:
587 ecode = errors.ECODE_TEMP_NORES
588 else:
589 ecode = errors.ECODE_NORES
590
591 raise errors.OpPrereqError("Can't compute nodes using"
592 " iallocator '%s': %s" %
593 (self.op.iallocator, ial.info),
594 ecode)
595
596 self.op.pnode = ial.result[0]
597 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
598 self.op.instance_name, self.op.iallocator,
599 utils.CommaJoin(ial.result))
600
601 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
602
603 if req.RequiredNodes() == 2:
604 self.op.snode = ial.result[1]
605
606 def BuildHooksEnv(self):
607 """Build hooks env.
608
609 This runs on master, primary and secondary nodes of the instance.
610
611 """
612 env = {
613 "ADD_MODE": self.op.mode,
614 }
615 if self.op.mode == constants.INSTANCE_IMPORT:
616 env["SRC_NODE"] = self.op.src_node
617 env["SRC_PATH"] = self.op.src_path
618 env["SRC_IMAGES"] = self.src_images
619
620 env.update(_BuildInstanceHookEnv(
621 name=self.op.instance_name,
622 primary_node=self.op.pnode,
623 secondary_nodes=self.secondaries,
624 status=self.op.start,
625 os_type=self.op.os_type,
626 minmem=self.be_full[constants.BE_MINMEM],
627 maxmem=self.be_full[constants.BE_MAXMEM],
628 vcpus=self.be_full[constants.BE_VCPUS],
629 nics=_NICListToTuple(self, self.nics),
630 disk_template=self.op.disk_template,
631 disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
632 d[constants.IDISK_MODE]) for d in self.disks],
633 bep=self.be_full,
634 hvp=self.hv_full,
635 hypervisor_name=self.op.hypervisor,
636 tags=self.op.tags,
637 ))
638
639 return env
640
641 def BuildHooksNodes(self):
642 """Build hooks nodes.
643
644 """
645 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
646 return nl, nl
647
648 def _ReadExportInfo(self):
649 """Reads the export information from disk.
650
651 It will override the opcode source node and path with the actual
652 information, if these two were not specified before.
653
654 @return: the export information
655
656 """
657 assert self.op.mode == constants.INSTANCE_IMPORT
658
659 src_node = self.op.src_node
660 src_path = self.op.src_path
661
662 if src_node is None:
663 locked_nodes = self.owned_locks(locking.LEVEL_NODE)
664 exp_list = self.rpc.call_export_list(locked_nodes)
665 found = False
666 for node in exp_list:
667 if exp_list[node].fail_msg:
668 continue
669 if src_path in exp_list[node].payload:
670 found = True
671 self.op.src_node = src_node = node
672 self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
673 src_path)
674 break
675 if not found:
676 raise errors.OpPrereqError("No export found for relative path %s" %
677 src_path, errors.ECODE_INVAL)
678
679 _CheckNodeOnline(self, src_node)
680 result = self.rpc.call_export_info(src_node, src_path)
681 result.Raise("No export or invalid export found in dir %s" % src_path)
682
683 export_info = objects.SerializableConfigParser.Loads(str(result.payload))
684 if not export_info.has_section(constants.INISECT_EXP):
685 raise errors.ProgrammerError("Corrupted export config",
686 errors.ECODE_ENVIRON)
687
688 ei_version = export_info.get(constants.INISECT_EXP, "version")
689 if (int(ei_version) != constants.EXPORT_VERSION):
690 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
691 (ei_version, constants.EXPORT_VERSION),
692 errors.ECODE_ENVIRON)
693 return export_info
694
695 def _ReadExportParams(self, einfo):
696 """Use export parameters as defaults.
697
698 In case the opcode doesn't specify (as in override) some instance
699 parameters, then try to use them from the export information, if
700 that declares them.
701
702 """
703 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
704
705 if self.op.disk_template is None:
706 if einfo.has_option(constants.INISECT_INS, "disk_template"):
707 self.op.disk_template = einfo.get(constants.INISECT_INS,
708 "disk_template")
709 if self.op.disk_template not in constants.DISK_TEMPLATES:
710 raise errors.OpPrereqError("Disk template specified in configuration"
711 " file is not one of the allowed values:"
712 " %s" %
713 " ".join(constants.DISK_TEMPLATES),
714 errors.ECODE_INVAL)
715 else:
716 raise errors.OpPrereqError("No disk template specified and the export"
717 " is missing the disk_template information",
718 errors.ECODE_INVAL)
719
720 if not self.op.disks:
721 disks = []
722 # TODO: import the disk iv_name too
723 for idx in range(constants.MAX_DISKS):
724 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
725 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
726 disks.append({constants.IDISK_SIZE: disk_sz})
727 self.op.disks = disks
728 if not disks and self.op.disk_template != constants.DT_DISKLESS:
729 raise errors.OpPrereqError("No disk info specified and the export"
730 " is missing the disk information",
731 errors.ECODE_INVAL)
732
733 if not self.op.nics:
734 nics = []
735 for idx in range(constants.MAX_NICS):
736 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
737 ndict = {}
738 for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
739 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
740 ndict[name] = v
741 nics.append(ndict)
742 else:
743 break
744 self.op.nics = nics
745
746 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
747 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
748
749 if (self.op.hypervisor is None and
750 einfo.has_option(constants.INISECT_INS, "hypervisor")):
751 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
752
753 if einfo.has_section(constants.INISECT_HYP):
754 # use the export parameters but do not override the ones
755 # specified by the user
756 for name, value in einfo.items(constants.INISECT_HYP):
757 if name not in self.op.hvparams:
758 self.op.hvparams[name] = value
759
760 if einfo.has_section(constants.INISECT_BEP):
761 # use the parameters, without overriding
762 for name, value in einfo.items(constants.INISECT_BEP):
763 if name not in self.op.beparams:
764 self.op.beparams[name] = value
765 # Compatibility for the old "memory" be param
766 if name == constants.BE_MEMORY:
767 if constants.BE_MAXMEM not in self.op.beparams:
768 self.op.beparams[constants.BE_MAXMEM] = value
769 if constants.BE_MINMEM not in self.op.beparams:
770 self.op.beparams[constants.BE_MINMEM] = value
771 else:
772 # try to read the parameters old style, from the main section
773 for name in constants.BES_PARAMETERS:
774 if (name not in self.op.beparams and
775 einfo.has_option(constants.INISECT_INS, name)):
776 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
777
778 if einfo.has_section(constants.INISECT_OSP):
779 # use the parameters, without overriding
780 for name, value in einfo.items(constants.INISECT_OSP):
781 if name not in self.op.osparams:
782 self.op.osparams[name] = value
783
784 def _RevertToDefaults(self, cluster):
785 """Revert the instance parameters to the default values.
786
787 """
788 # hvparams
789 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
790 for name in self.op.hvparams.keys():
791 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
792 del self.op.hvparams[name]
793 # beparams
794 be_defs = cluster.SimpleFillBE({})
795 for name in self.op.beparams.keys():
796 if name in be_defs and be_defs[name] == self.op.beparams[name]:
797 del self.op.beparams[name]
798 # nic params
799 nic_defs = cluster.SimpleFillNIC({})
800 for nic in self.op.nics:
801 for name in constants.NICS_PARAMETERS:
802 if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
803 del nic[name]
804 # osparams
805 os_defs = cluster.SimpleFillOS(self.op.os_type, {})
806 for name in self.op.osparams.keys():
807 if name in os_defs and os_defs[name] == self.op.osparams[name]:
808 del self.op.osparams[name]
809
810 def _CalculateFileStorageDir(self):
811 """Calculate final instance file storage dir.
812
813 """
814 # file storage dir calculation/check
815 self.instance_file_storage_dir = None
816 if self.op.disk_template in constants.DTS_FILEBASED:
817 # build the full file storage dir path
818 joinargs = []
819
820 if self.op.disk_template == constants.DT_SHARED_FILE:
821 get_fsd_fn = self.cfg.GetSharedFileStorageDir
822 else:
823 get_fsd_fn = self.cfg.GetFileStorageDir
824
825 cfg_storagedir = get_fsd_fn()
826 if not cfg_storagedir:
827 raise errors.OpPrereqError("Cluster file storage dir not defined",
828 errors.ECODE_STATE)
829 joinargs.append(cfg_storagedir)
830
831 if self.op.file_storage_dir is not None:
832 joinargs.append(self.op.file_storage_dir)
833
834 joinargs.append(self.op.instance_name)
835
836 # pylint: disable=W0142
837 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
838
839 def CheckPrereq(self): # pylint: disable=R0914
840 """Check prerequisites.
841
842 """
843 self._CalculateFileStorageDir()
844
845 if self.op.mode == constants.INSTANCE_IMPORT:
846 export_info = self._ReadExportInfo()
847 self._ReadExportParams(export_info)
848 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
849 else:
850 self._old_instance_name = None
851
852 if (not self.cfg.GetVGName() and
853 self.op.disk_template not in constants.DTS_NOT_LVM):
854 raise errors.OpPrereqError("Cluster does not support lvm-based"
855 " instances", errors.ECODE_STATE)
856
857 if (self.op.hypervisor is None or
858 self.op.hypervisor == constants.VALUE_AUTO):
859 self.op.hypervisor = self.cfg.GetHypervisorType()
860
861 cluster = self.cfg.GetClusterInfo()
862 enabled_hvs = cluster.enabled_hypervisors
863 if self.op.hypervisor not in enabled_hvs:
864 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
865 " cluster (%s)" %
866 (self.op.hypervisor, ",".join(enabled_hvs)),
867 errors.ECODE_STATE)
868
869 # Check tag validity
870 for tag in self.op.tags:
871 objects.TaggableObject.ValidateTag(tag)
872
873 # check hypervisor parameter syntax (locally)
874 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
875 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
876 self.op.hvparams)
877 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
878 hv_type.CheckParameterSyntax(filled_hvp)
879 self.hv_full = filled_hvp
880 # check that we don't specify global parameters on an instance
881 _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
882 "instance", "cluster")
883
884 # fill and remember the beparams dict
885 self.be_full = _ComputeFullBeParams(self.op, cluster)
886
887 # build os parameters
888 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
889
890 # now that hvp/bep are in final format, let's reset to defaults,
891 # if told to do so
892 if self.op.identify_defaults:
893 self._RevertToDefaults(cluster)
894
895 # NIC buildup
896 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
897 self.proc.GetECId())
898
899 # disk checks/pre-build
900 default_vg = self.cfg.GetVGName()
901 self.disks = _ComputeDisks(self.op, default_vg)
902
903 if self.op.mode == constants.INSTANCE_IMPORT:
904 disk_images = []
905 for idx in range(len(self.disks)):
906 option = "disk%d_dump" % idx
907 if export_info.has_option(constants.INISECT_INS, option):
908 # FIXME: are the old os-es, disk sizes, etc. useful?
909 export_name = export_info.get(constants.INISECT_INS, option)
910 image = utils.PathJoin(self.op.src_path, export_name)
911 disk_images.append(image)
912 else:
913 disk_images.append(False)
914
915 self.src_images = disk_images
916
917 if self.op.instance_name == self._old_instance_name:
918 for idx, nic in enumerate(self.nics):
919 if nic.mac == constants.VALUE_AUTO:
920 nic_mac_ini = "nic%d_mac" % idx
921 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
922
923 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
924
925 # ip ping checks (we use the same ip that was resolved in ExpandNames)
926 if self.op.ip_check:
927 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
928 raise errors.OpPrereqError("IP %s of instance %s already in use" %
929 (self.check_ip, self.op.instance_name),
930 errors.ECODE_NOTUNIQUE)
931
932 #### mac address generation
933 # By generating here the mac address both the allocator and the hooks get
934 # the real final mac address rather than the 'auto' or 'generate' value.
935 # There is a race condition between the generation and the instance object
936 # creation, which means that we know the mac is valid now, but we're not
937 # sure it will be when we actually add the instance. If things go bad
938 # adding the instance will abort because of a duplicate mac, and the
939 # creation job will fail.
940 for nic in self.nics:
941 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
942 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
943
944 #### allocator run
945
946 if self.op.iallocator is not None:
947 self._RunAllocator()
948
949 # Release all unneeded node locks
950 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
951 _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
952 _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
953 _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
954
955 assert (self.owned_locks(locking.LEVEL_NODE) ==
956 self.owned_locks(locking.LEVEL_NODE_RES)), \
957 "Node locks differ from node resource locks"
958
959 #### node related checks
960
961 # check primary node
962 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
963 assert self.pnode is not None, \
964 "Cannot retrieve locked node %s" % self.op.pnode
965 if pnode.offline:
966 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
967 pnode.name, errors.ECODE_STATE)
968 if pnode.drained:
969 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
970 pnode.name, errors.ECODE_STATE)
971 if not pnode.vm_capable:
972 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
973 " '%s'" % pnode.name, errors.ECODE_STATE)
974
975 self.secondaries = []
976
977 # Fill in any IPs from IP pools. This must happen here, because we need to
978 # know the nic's primary node, as specified by the iallocator
979 for idx, nic in enumerate(self.nics):
980 net_uuid = nic.network
981 if net_uuid is not None:
982 nobj = self.cfg.GetNetwork(net_uuid)
983 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
984 if netparams is None:
985 raise errors.OpPrereqError("No netparams found for network"
986 " %s. Propably not connected to"
987 " node's %s nodegroup" %
988 (nobj.name, self.pnode.name),
989 errors.ECODE_INVAL)
990 self.LogInfo("NIC/%d inherits netparams %s" %
991 (idx, netparams.values()))
992 nic.nicparams = dict(netparams)
993 if nic.ip is not None:
994 if nic.ip.lower() == constants.NIC_IP_POOL:
995 try:
996 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
997 except errors.ReservationError:
998 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
999 " from the address pool" % idx,
1000 errors.ECODE_STATE)
1001 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1002 else:
1003 try:
1004 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1005 except errors.ReservationError:
1006 raise errors.OpPrereqError("IP address %s already in use"
1007 " or does not belong to network %s" %
1008 (nic.ip, nobj.name),
1009 errors.ECODE_NOTUNIQUE)
1010
1011 # net is None, ip None or given
1012 elif self.op.conflicts_check:
1013 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1014
1015 # mirror node verification
1016 if self.op.disk_template in constants.DTS_INT_MIRROR:
1017 if self.op.snode == pnode.name:
1018 raise errors.OpPrereqError("The secondary node cannot be the"
1019 " primary node", errors.ECODE_INVAL)
1020 _CheckNodeOnline(self, self.op.snode)
1021 _CheckNodeNotDrained(self, self.op.snode)
1022 _CheckNodeVmCapable(self, self.op.snode)
1023 self.secondaries.append(self.op.snode)
1024
1025 snode = self.cfg.GetNodeInfo(self.op.snode)
1026 if pnode.group != snode.group:
1027 self.LogWarning("The primary and secondary nodes are in two"
1028 " different node groups; the disk parameters"
1029 " from the first disk's node group will be"
1030 " used")
1031
1032 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1033 nodes = [pnode]
1034 if self.op.disk_template in constants.DTS_INT_MIRROR:
1035 nodes.append(snode)
1036 has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
1037 if compat.any(map(has_es, nodes)):
1038 raise errors.OpPrereqError("Disk template %s not supported with"
1039 " exclusive storage" % self.op.disk_template,
1040 errors.ECODE_STATE)
1041
1042 nodenames = [pnode.name] + self.secondaries
1043
1044 if not self.adopt_disks:
1045 if self.op.disk_template == constants.DT_RBD:
1046 # _CheckRADOSFreeSpace() is just a placeholder.
1047 # Any function that checks prerequisites can be placed here.
1048 # Check if there is enough space on the RADOS cluster.
1049 _CheckRADOSFreeSpace()
1050 elif self.op.disk_template == constants.DT_EXT:
1051 # FIXME: Function that checks prereqs if needed
1052 pass
1053 else:
1054 # Check lv size requirements, if not adopting
1055 req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1056 _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1057
1058 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1059 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1060 disk[constants.IDISK_ADOPT])
1061 for disk in self.disks])
1062 if len(all_lvs) != len(self.disks):
1063 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1064 errors.ECODE_INVAL)
1065 for lv_name in all_lvs:
1066 try:
1067 # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1068 # to ReserveLV uses the same syntax
1069 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1070 except errors.ReservationError:
1071 raise errors.OpPrereqError("LV named %s used by another instance" %
1072 lv_name, errors.ECODE_NOTUNIQUE)
1073
1074 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1075 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1076
1077 node_lvs = self.rpc.call_lv_list([pnode.name],
1078 vg_names.payload.keys())[pnode.name]
1079 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1080 node_lvs = node_lvs.payload
1081
1082 delta = all_lvs.difference(node_lvs.keys())
1083 if delta:
1084 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1085 utils.CommaJoin(delta),
1086 errors.ECODE_INVAL)
1087 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1088 if online_lvs:
1089 raise errors.OpPrereqError("Online logical volumes found, cannot"
1090 " adopt: %s" % utils.CommaJoin(online_lvs),
1091 errors.ECODE_STATE)
1092 # update the size of disk based on what is found
1093 for dsk in self.disks:
1094 dsk[constants.IDISK_SIZE] = \
1095 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1096 dsk[constants.IDISK_ADOPT])][0]))
1097
1098 elif self.op.disk_template == constants.DT_BLOCK:
1099 # Normalize and de-duplicate device paths
1100 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1101 for disk in self.disks])
1102 if len(all_disks) != len(self.disks):
1103 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1104 errors.ECODE_INVAL)
1105 baddisks = [d for d in all_disks
1106 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1107 if baddisks:
1108 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1109 " cannot be adopted" %
1110 (utils.CommaJoin(baddisks),
1111 constants.ADOPTABLE_BLOCKDEV_ROOT),
1112 errors.ECODE_INVAL)
1113
1114 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1115 list(all_disks))[pnode.name]
1116 node_disks.Raise("Cannot get block device information from node %s" %
1117 pnode.name)
1118 node_disks = node_disks.payload
1119 delta = all_disks.difference(node_disks.keys())
1120 if delta:
1121 raise errors.OpPrereqError("Missing block device(s): %s" %
1122 utils.CommaJoin(delta),
1123 errors.ECODE_INVAL)
1124 for dsk in self.disks:
1125 dsk[constants.IDISK_SIZE] = \
1126 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1127
1128 # Verify instance specs
1129 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1130 ispec = {
1131 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1132 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1133 constants.ISPEC_DISK_COUNT: len(self.disks),
1134 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1135 for disk in self.disks],
1136 constants.ISPEC_NIC_COUNT: len(self.nics),
1137 constants.ISPEC_SPINDLE_USE: spindle_use,
1138 }
1139
1140 group_info = self.cfg.GetNodeGroup(pnode.group)
1141 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1142 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1143 self.op.disk_template)
1144 if not self.op.ignore_ipolicy and res:
1145 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1146 (pnode.group, group_info.name, utils.CommaJoin(res)))
1147 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1148
1149 _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1150
1151 _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1152 # check OS parameters (remotely)
1153 _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1154
1155 _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1156
1157 #TODO: _CheckExtParams (remotely)
1158 # Check parameters for extstorage
1159
1160 # memory check on primary node
1161 #TODO(dynmem): use MINMEM for checking
1162 if self.op.start:
1163 _CheckNodeFreeMemory(self, self.pnode.name,
1164 "creating instance %s" % self.op.instance_name,
1165 self.be_full[constants.BE_MAXMEM],
1166 self.op.hypervisor)
1167
1168 self.dry_run_result = list(nodenames)
1169
1170 def Exec(self, feedback_fn):
1171 """Create and add the instance to the cluster.
1172
1173 """
1174 instance = self.op.instance_name
1175 pnode_name = self.pnode.name
1176
1177 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1178 self.owned_locks(locking.LEVEL_NODE)), \
1179 "Node locks differ from node resource locks"
1180 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1181
1182 ht_kind = self.op.hypervisor
1183 if ht_kind in constants.HTS_REQ_PORT:
1184 network_port = self.cfg.AllocatePort()
1185 else:
1186 network_port = None
1187
1188 # This is ugly but we got a chicken-egg problem here
1189 # We can only take the group disk parameters, as the instance
1190 # has no disks yet (we are generating them right here).
1191 node = self.cfg.GetNodeInfo(pnode_name)
1192 nodegroup = self.cfg.GetNodeGroup(node.group)
1193 disks = _GenerateDiskTemplate(self,
1194 self.op.disk_template,
1195 instance, pnode_name,
1196 self.secondaries,
1197 self.disks,
1198 self.instance_file_storage_dir,
1199 self.op.file_driver,
1200 0,
1201 feedback_fn,
1202 self.cfg.GetGroupDiskParams(nodegroup))
1203
1204 iobj = objects.Instance(name=instance, os=self.op.os_type,
1205 primary_node=pnode_name,
1206 nics=self.nics, disks=disks,
1207 disk_template=self.op.disk_template,
1208 admin_state=constants.ADMINST_DOWN,
1209 network_port=network_port,
1210 beparams=self.op.beparams,
1211 hvparams=self.op.hvparams,
1212 hypervisor=self.op.hypervisor,
1213 osparams=self.op.osparams,
1214 )
1215
1216 if self.op.tags:
1217 for tag in self.op.tags:
1218 iobj.AddTag(tag)
1219
1220 if self.adopt_disks:
1221 if self.op.disk_template == constants.DT_PLAIN:
1222 # rename LVs to the newly-generated names; we need to construct
1223 # 'fake' LV disks with the old data, plus the new unique_id
1224 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1225 rename_to = []
1226 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1227 rename_to.append(t_dsk.logical_id)
1228 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1229 self.cfg.SetDiskID(t_dsk, pnode_name)
1230 result = self.rpc.call_blockdev_rename(pnode_name,
1231 zip(tmp_disks, rename_to))
1232 result.Raise("Failed to rename adoped LVs")
1233 else:
1234 feedback_fn("* creating instance disks...")
1235 try:
1236 _CreateDisks(self, iobj)
1237 except errors.OpExecError:
1238 self.LogWarning("Device creation failed")
1239 self.cfg.ReleaseDRBDMinors(instance)
1240 raise
1241
1242 feedback_fn("adding instance %s to cluster config" % instance)
1243
1244 self.cfg.AddInstance(iobj, self.proc.GetECId())
1245
1246 # Declare that we don't want to remove the instance lock anymore, as we've
1247 # added the instance to the config
1248 del self.remove_locks[locking.LEVEL_INSTANCE]
1249
1250 if self.op.mode == constants.INSTANCE_IMPORT:
1251 # Release unused nodes
1252 _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1253 else:
1254 # Release all nodes
1255 _ReleaseLocks(self, locking.LEVEL_NODE)
1256
1257 disk_abort = False
1258 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1259 feedback_fn("* wiping instance disks...")
1260 try:
1261 _WipeDisks(self, iobj)
1262 except errors.OpExecError, err:
1263 logging.exception("Wiping disks failed")
1264 self.LogWarning("Wiping instance disks failed (%s)", err)
1265 disk_abort = True
1266
1267 if disk_abort:
1268 # Something is already wrong with the disks, don't do anything else
1269 pass
1270 elif self.op.wait_for_sync:
1271 disk_abort = not _WaitForSync(self, iobj)
1272 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1273 # make sure the disks are not degraded (still sync-ing is ok)
1274 feedback_fn("* checking mirrors status")
1275 disk_abort = not _WaitForSync(self, iobj, oneshot=True)
1276 else:
1277 disk_abort = False
1278
1279 if disk_abort:
1280 _RemoveDisks(self, iobj)
1281 self.cfg.RemoveInstance(iobj.name)
1282 # Make sure the instance lock gets removed
1283 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1284 raise errors.OpExecError("There are some degraded disks for"
1285 " this instance")
1286
1287 # Release all node resource locks
1288 _ReleaseLocks(self, locking.LEVEL_NODE_RES)
1289
1290 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1291 # we need to set the disks ID to the primary node, since the
1292 # preceding code might or might have not done it, depending on
1293 # disk template and other options
1294 for disk in iobj.disks:
1295 self.cfg.SetDiskID(disk, pnode_name)
1296 if self.op.mode == constants.INSTANCE_CREATE:
1297 if not self.op.no_install:
1298 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1299 not self.op.wait_for_sync)
1300 if pause_sync:
1301 feedback_fn("* pausing disk sync to install instance OS")
1302 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1303 (iobj.disks,
1304 iobj), True)
1305 for idx, success in enumerate(result.payload):
1306 if not success:
1307 logging.warn("pause-sync of instance %s for disk %d failed",
1308 instance, idx)
1309
1310 feedback_fn("* running the instance OS create scripts...")
1311 # FIXME: pass debug option from opcode to backend
1312 os_add_result = \
1313 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1314 self.op.debug_level)
1315 if pause_sync:
1316 feedback_fn("* resuming disk sync")
1317 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1318 (iobj.disks,
1319 iobj), False)
1320 for idx, success in enumerate(result.payload):
1321 if not success:
1322 logging.warn("resume-sync of instance %s for disk %d failed",
1323 instance, idx)
1324
1325 os_add_result.Raise("Could not add os for instance %s"
1326 " on node %s" % (instance, pnode_name))
1327
1328 else:
1329 if self.op.mode == constants.INSTANCE_IMPORT:
1330 feedback_fn("* running the instance OS import scripts...")
1331
1332 transfers = []
1333
1334 for idx, image in enumerate(self.src_images):
1335 if not image:
1336 continue
1337
1338 # FIXME: pass debug option from opcode to backend
1339 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1340 constants.IEIO_FILE, (image, ),
1341 constants.IEIO_SCRIPT,
1342 (iobj.disks[idx], idx),
1343 None)
1344 transfers.append(dt)
1345
1346 import_result = \
1347 masterd.instance.TransferInstanceData(self, feedback_fn,
1348 self.op.src_node, pnode_name,
1349 self.pnode.secondary_ip,
1350 iobj, transfers)
1351 if not compat.all(import_result):
1352 self.LogWarning("Some disks for instance %s on node %s were not"
1353 " imported successfully" % (instance, pnode_name))
1354
1355 rename_from = self._old_instance_name
1356
1357 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1358 feedback_fn("* preparing remote import...")
1359 # The source cluster will stop the instance before attempting to make
1360 # a connection. In some cases stopping an instance can take a long
1361 # time, hence the shutdown timeout is added to the connection
1362 # timeout.
1363 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1364 self.op.source_shutdown_timeout)
1365 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1366
1367 assert iobj.primary_node == self.pnode.name
1368 disk_results = \
1369 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1370 self.source_x509_ca,
1371 self._cds, timeouts)
1372 if not compat.all(disk_results):
1373 # TODO: Should the instance still be started, even if some disks
1374 # failed to import (valid for local imports, too)?
1375 self.LogWarning("Some disks for instance %s on node %s were not"
1376 " imported successfully" % (instance, pnode_name))
1377
1378 rename_from = self.source_instance_name
1379
1380 else:
1381 # also checked in the prereq part
1382 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1383 % self.op.mode)
1384
1385 # Run rename script on newly imported instance
1386 assert iobj.name == instance
1387 feedback_fn("Running rename script for %s" % instance)
1388 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1389 rename_from,
1390 self.op.debug_level)
1391 if result.fail_msg:
1392 self.LogWarning("Failed to run rename script for %s on node"
1393 " %s: %s" % (instance, pnode_name, result.fail_msg))
1394
1395 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1396
1397 if self.op.start:
1398 iobj.admin_state = constants.ADMINST_UP
1399 self.cfg.Update(iobj, feedback_fn)
1400 logging.info("Starting instance %s on node %s", instance, pnode_name)
1401 feedback_fn("* starting instance...")
1402 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1403 False, self.op.reason)
1404 result.Raise("Could not start instance")
1405
1406 return list(iobj.all_nodes)
1407
1408
1409 class LUInstanceRename(LogicalUnit):
1410 """Rename an instance.
1411
1412 """
1413 HPATH = "instance-rename"
1414 HTYPE = constants.HTYPE_INSTANCE
1415
1416 def CheckArguments(self):
1417 """Check arguments.
1418
1419 """
1420 if self.op.ip_check and not self.op.name_check:
1421 # TODO: make the ip check more flexible and not depend on the name check
1422 raise errors.OpPrereqError("IP address check requires a name check",
1423 errors.ECODE_INVAL)
1424
1425 def BuildHooksEnv(self):
1426 """Build hooks env.
1427
1428 This runs on master, primary and secondary nodes of the instance.
1429
1430 """
1431 env = _BuildInstanceHookEnvByObject(self, self.instance)
1432 env["INSTANCE_NEW_NAME"] = self.op.new_name
1433 return env
1434
1435 def BuildHooksNodes(self):
1436 """Build hooks nodes.
1437
1438 """
1439 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1440 return (nl, nl)
1441
1442 def CheckPrereq(self):
1443 """Check prerequisites.
1444
1445 This checks that the instance is in the cluster and is not running.
1446
1447 """
1448 self.op.instance_name = _ExpandInstanceName(self.cfg,
1449 self.op.instance_name)
1450 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1451 assert instance is not None
1452 _CheckNodeOnline(self, instance.primary_node)
1453 _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1454 msg="cannot rename")
1455 self.instance = instance
1456
1457 new_name = self.op.new_name
1458 if self.op.name_check:
1459 hostname = _CheckHostnameSane(self, new_name)
1460 new_name = self.op.new_name = hostname.name
1461 if (self.op.ip_check and
1462 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1463 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1464 (hostname.ip, new_name),
1465 errors.ECODE_NOTUNIQUE)
1466
1467 instance_list = self.cfg.GetInstanceList()
1468 if new_name in instance_list and new_name != instance.name:
1469 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1470 new_name, errors.ECODE_EXISTS)
1471
1472 def Exec(self, feedback_fn):
1473 """Rename the instance.
1474
1475 """
1476 inst = self.instance
1477 old_name = inst.name
1478
1479 rename_file_storage = False
1480 if (inst.disk_template in constants.DTS_FILEBASED and
1481 self.op.new_name != inst.name):
1482 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1483 rename_file_storage = True
1484
1485 self.cfg.RenameInstance(inst.name, self.op.new_name)
1486 # Change the instance lock. This is definitely safe while we hold the BGL.
1487 # Otherwise the new lock would have to be added in acquired mode.
1488 assert self.REQ_BGL
1489 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1490 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1491 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1492
1493 # re-read the instance from the configuration after rename
1494 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1495
1496 if rename_file_storage:
1497 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1498 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1499 old_file_storage_dir,
1500 new_file_storage_dir)
1501 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1502 " (but the instance has been renamed in Ganeti)" %
1503 (inst.primary_node, old_file_storage_dir,
1504 new_file_storage_dir))
1505
1506 _StartInstanceDisks(self, inst, None)
1507 # update info on disks
1508 info = _GetInstanceInfoText(inst)
1509 for (idx, disk) in enumerate(inst.disks):
1510 for node in inst.all_nodes:
1511 self.cfg.SetDiskID(disk, node)
1512 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1513 if result.fail_msg:
1514 self.LogWarning("Error setting info on node %s for disk %s: %s",
1515 node, idx, result.fail_msg)
1516 try:
1517 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1518 old_name, self.op.debug_level)
1519 msg = result.fail_msg
1520 if msg:
1521 msg = ("Could not run OS rename script for instance %s on node %s"
1522 " (but the instance has been renamed in Ganeti): %s" %
1523 (inst.name, inst.primary_node, msg))
1524 self.LogWarning(msg)
1525 finally:
1526 _ShutdownInstanceDisks(self, inst)
1527
1528 return inst.name
1529
1530
1531 class LUInstanceRemove(LogicalUnit):
1532 """Remove an instance.
1533
1534 """
1535 HPATH = "instance-remove"
1536 HTYPE = constants.HTYPE_INSTANCE
1537 REQ_BGL = False
1538
1539 def ExpandNames(self):
1540 self._ExpandAndLockInstance()
1541 self.needed_locks[locking.LEVEL_NODE] = []
1542 self.needed_locks[locking.LEVEL_NODE_RES] = []
1543 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1544
1545 def DeclareLocks(self, level):
1546 if level == locking.LEVEL_NODE:
1547 self._LockInstancesNodes()
1548 elif level == locking.LEVEL_NODE_RES:
1549 # Copy node locks
1550 self.needed_locks[locking.LEVEL_NODE_RES] = \
1551 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1552
1553 def BuildHooksEnv(self):
1554 """Build hooks env.
1555
1556 This runs on master, primary and secondary nodes of the instance.
1557
1558 """
1559 env = _BuildInstanceHookEnvByObject(self, self.instance)
1560 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1561 return env
1562
1563 def BuildHooksNodes(self):
1564 """Build hooks nodes.
1565
1566 """
1567 nl = [self.cfg.GetMasterNode()]
1568 nl_post = list(self.instance.all_nodes) + nl
1569 return (nl, nl_post)
1570
1571 def CheckPrereq(self):
1572 """Check prerequisites.
1573
1574 This checks that the instance is in the cluster.
1575
1576 """
1577 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1578 assert self.instance is not None, \
1579 "Cannot retrieve locked instance %s" % self.op.instance_name
1580
1581 def Exec(self, feedback_fn):
1582 """Remove the instance.
1583
1584 """
1585 instance = self.instance
1586 logging.info("Shutting down instance %s on node %s",
1587 instance.name, instance.primary_node)
1588
1589 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1590 self.op.shutdown_timeout,
1591 self.op.reason)
1592 msg = result.fail_msg
1593 if msg:
1594 if self.op.ignore_failures:
1595 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1596 else:
1597 raise errors.OpExecError("Could not shutdown instance %s on"
1598 " node %s: %s" %
1599 (instance.name, instance.primary_node, msg))
1600
1601 assert (self.owned_locks(locking.LEVEL_NODE) ==
1602 self.owned_locks(locking.LEVEL_NODE_RES))
1603 assert not (set(instance.all_nodes) -
1604 self.owned_locks(locking.LEVEL_NODE)), \
1605 "Not owning correct locks"
1606
1607 _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1608
1609
1610 class LUInstanceMove(LogicalUnit):
1611 """Move an instance by data-copying.
1612
1613 """
1614 HPATH = "instance-move"
1615 HTYPE = constants.HTYPE_INSTANCE
1616 REQ_BGL = False
1617
1618 def ExpandNames(self):
1619 self._ExpandAndLockInstance()
1620 target_node = _ExpandNodeName(self.cfg, self.op.target_node)
1621 self.op.target_node = target_node
1622 self.needed_locks[locking.LEVEL_NODE] = [target_node]
1623 self.needed_locks[locking.LEVEL_NODE_RES] = []
1624 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1625
1626 def DeclareLocks(self, level):
1627 if level == locking.LEVEL_NODE:
1628 self._LockInstancesNodes(primary_only=True)
1629 elif level == locking.LEVEL_NODE_RES:
1630 # Copy node locks
1631 self.needed_locks[locking.LEVEL_NODE_RES] = \
1632 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1633
1634 def BuildHooksEnv(self):
1635 """Build hooks env.
1636
1637 This runs on master, primary and secondary nodes of the instance.
1638
1639 """
1640 env = {
1641 "TARGET_NODE": self.op.target_node,
1642 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1643 }
1644 env.update(_BuildInstanceHookEnvByObject(self, self.instance))
1645 return env
1646
1647 def BuildHooksNodes(self):
1648 """Build hooks nodes.
1649
1650 """
1651 nl = [
1652 self.cfg.GetMasterNode(),
1653 self.instance.primary_node,
1654 self.op.target_node,
1655 ]
1656 return (nl, nl)
1657
1658 def CheckPrereq(self):
1659 """Check prerequisites.
1660
1661 This checks that the instance is in the cluster.
1662
1663 """
1664 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1665 assert self.instance is not None, \
1666 "Cannot retrieve locked instance %s" % self.op.instance_name
1667
1668 if instance.disk_template not in constants.DTS_COPYABLE:
1669 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1670 instance.disk_template, errors.ECODE_STATE)
1671
1672 node = self.cfg.GetNodeInfo(self.op.target_node)
1673 assert node is not None, \
1674 "Cannot retrieve locked node %s" % self.op.target_node
1675
1676 self.target_node = target_node = node.name
1677
1678 if target_node == instance.primary_node:
1679 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1680 (instance.name, target_node),
1681 errors.ECODE_STATE)
1682
1683 bep = self.cfg.GetClusterInfo().FillBE(instance)
1684
1685 for idx, dsk in enumerate(instance.disks):
1686 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1687 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1688 " cannot copy" % idx, errors.ECODE_STATE)
1689
1690 _CheckNodeOnline(self, target_node)
1691 _CheckNodeNotDrained(self, target_node)
1692 _CheckNodeVmCapable(self, target_node)
1693 cluster = self.cfg.GetClusterInfo()
1694 group_info = self.cfg.GetNodeGroup(node.group)
1695 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1696 _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1697 ignore=self.op.ignore_ipolicy)
1698
1699 if instance.admin_state == constants.ADMINST_UP:
1700 # check memory requirements on the secondary node
1701 _CheckNodeFreeMemory(self, target_node,
1702 "failing over instance %s" %
1703 instance.name, bep[constants.BE_MAXMEM],
1704 instance.hypervisor)
1705 else:
1706 self.LogInfo("Not checking memory on the secondary node as"
1707 " instance will not be started")
1708
1709 # check bridge existance
1710 _CheckInstanceBridgesExist(self, instance, node=target_node)
1711
1712 def Exec(self, feedback_fn):
1713 """Move an instance.
1714
1715 The move is done by shutting it down on its present node, copying
1716 the data over (slow) and starting it on the new node.
1717
1718 """
1719 instance = self.instance
1720
1721 source_node = instance.primary_node
1722 target_node = self.target_node
1723
1724 self.LogInfo("Shutting down instance %s on source node %s",
1725 instance.name, source_node)
1726
1727 assert (self.owned_locks(locking.LEVEL_NODE) ==
1728 self.owned_locks(locking.LEVEL_NODE_RES))
1729
1730 result = self.rpc.call_instance_shutdown(source_node, instance,
1731 self.op.shutdown_timeout,
1732 self.op.reason)
1733 msg = result.fail_msg
1734 if msg:
1735 if self.op.ignore_consistency:
1736 self.LogWarning("Could not shutdown instance %s on node %s."
1737 " Proceeding anyway. Please make sure node"
1738 " %s is down. Error details: %s",
1739 instance.name, source_node, source_node, msg)
1740 else:
1741 raise errors.OpExecError("Could not shutdown instance %s on"
1742 " node %s: %s" %
1743 (instance.name, source_node, msg))
1744
1745 # create the target disks
1746 try:
1747 _CreateDisks(self, instance, target_node=target_node)
1748 except errors.OpExecError:
1749 self.LogWarning("Device creation failed")
1750 self.cfg.ReleaseDRBDMinors(instance.name)
1751 raise
1752
1753 cluster_name = self.cfg.GetClusterInfo().cluster_name
1754
1755 errs = []
1756 # activate, get path, copy the data over
1757 for idx, disk in enumerate(instance.disks):
1758 self.LogInfo("Copying data for disk %d", idx)
1759 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1760 instance.name, True, idx)
1761 if result.fail_msg:
1762 self.LogWarning("Can't assemble newly created disk %d: %s",
1763 idx, result.fail_msg)
1764 errs.append(result.fail_msg)
1765 break
1766 dev_path = result.payload
1767 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1768 target_node, dev_path,
1769 cluster_name)
1770 if result.fail_msg:
1771 self.LogWarning("Can't copy data over for disk %d: %s",
1772 idx, result.fail_msg)
1773 errs.append(result.fail_msg)
1774 break
1775
1776 if errs:
1777 self.LogWarning("Some disks failed to copy, aborting")
1778 try:
1779 _RemoveDisks(self, instance, target_node=target_node)
1780 finally:
1781 self.cfg.ReleaseDRBDMinors(instance.name)
1782 raise errors.OpExecError("Errors during disk copy: %s" %
1783 (",".join(errs),))
1784
1785 instance.primary_node = target_node
1786 self.cfg.Update(instance, feedback_fn)
1787
1788 self.LogInfo("Removing the disks on the original node")
1789 _RemoveDisks(self, instance, target_node=source_node)
1790
1791 # Only start the instance if it's marked as up
1792 if instance.admin_state == constants.ADMINST_UP:
1793 self.LogInfo("Starting instance %s on node %s",
1794 instance.name, target_node)
1795
1796 disks_ok, _ = _AssembleInstanceDisks(self, instance,
1797 ignore_secondaries=True)
1798 if not disks_ok:
1799 _ShutdownInstanceDisks(self, instance)
1800 raise errors.OpExecError("Can't activate the instance's disks")
1801
1802 result = self.rpc.call_instance_start(target_node,
1803 (instance, None, None), False,
1804 self.op.reason)
1805 msg = result.fail_msg
1806 if msg:
1807 _ShutdownInstanceDisks(self, instance)
1808 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1809 (instance.name, target_node, msg))
1810
1811
1812 class _InstanceQuery(_QueryBase):
1813 FIELDS = query.INSTANCE_FIELDS
1814
1815 def ExpandNames(self, lu):
1816 lu.needed_locks = {}
1817 lu.share_locks = _ShareAll()
1818
1819 if self.names:
1820 self.wanted = _GetWantedInstances(lu, self.names)
1821 else:
1822 self.wanted = locking.ALL_SET
1823
1824 self.do_locking = (self.use_locking and
1825 query.IQ_LIVE in self.requested_data)
1826 if self.do_locking:
1827 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
1828 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
1829 lu.needed_locks[locking.LEVEL_NODE] = []
1830 lu.needed_locks[locking.LEVEL_NETWORK] = []
1831 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1832
1833 self.do_grouplocks = (self.do_locking and
1834 query.IQ_NODES in self.requested_data)
1835
1836 def DeclareLocks(self, lu, level):
1837 if self.do_locking:
1838 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
1839 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
1840
1841 # Lock all groups used by instances optimistically; this requires going
1842 # via the node before it's locked, requiring verification later on
1843 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
1844 set(group_uuid
1845 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1846 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
1847 elif level == locking.LEVEL_NODE:
1848 lu._LockInstancesNodes() # pylint: disable=W0212
1849
1850 elif level == locking.LEVEL_NETWORK:
1851 lu.needed_locks[locking.LEVEL_NETWORK] = \
1852 frozenset(net_uuid
1853 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1854 for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
1855
1856 @staticmethod
1857 def _CheckGroupLocks(lu):
1858 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
1859 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
1860
1861 # Check if node groups for locked instances are still correct
1862 for instance_name in owned_instances:
1863 _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
1864
1865 def _GetQueryData(self, lu):
1866 """Computes the list of instances and their attributes.
1867
1868 """
1869 if self.do_grouplocks:
1870 self._CheckGroupLocks(lu)
1871
1872 cluster = lu.cfg.GetClusterInfo()
1873 all_info = lu.cfg.GetAllInstancesInfo()
1874
1875 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
1876
1877 instance_list = [all_info[name] for name in instance_names]
1878 nodes = frozenset(itertools.chain(*(inst.all_nodes
1879 for inst in instance_list)))
1880 hv_list = list(set([inst.hypervisor for inst in instance_list]))
1881 bad_nodes = []
1882 offline_nodes = []
1883 wrongnode_inst = set()
1884
1885 # Gather data as requested
1886 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
1887 live_data = {}
1888 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
1889 for name in nodes:
1890 result = node_data[name]
1891 if result.offline:
1892 # offline nodes will be in both lists
1893 assert result.fail_msg
1894 offline_nodes.append(name)
1895 if result.fail_msg:
1896 bad_nodes.append(name)
1897 elif result.payload:
1898 for inst in result.payload:
1899 if inst in all_info:
1900 if all_info[inst].primary_node == name:
1901 live_data.update(result.payload)
1902 else:
1903 wrongnode_inst.add(inst)
1904 else:
1905 # orphan instance; we don't list it here as we don't
1906 # handle this case yet in the output of instance listing
1907 logging.warning("Orphan instance '%s' found on node %s",
1908 inst, name)
1909 # else no instance is alive
1910 else:
1911 live_data = {}
1912
1913 if query.IQ_DISKUSAGE in self.requested_data:
1914 gmi = ganeti.masterd.instance
1915 disk_usage = dict((inst.name,
1916 gmi.ComputeDiskSize(inst.disk_template,
1917 [{constants.IDISK_SIZE: disk.size}
1918 for disk in inst.disks]))
1919 for inst in instance_list)
1920 else:
1921 disk_usage = None
1922
1923 if query.IQ_CONSOLE in self.requested_data:
1924 consinfo = {}
1925 for inst in instance_list:
1926 if inst.name in live_data:
1927 # Instance is running
1928 consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
1929 else:
1930 consinfo[inst.name] = None
1931 assert set(consinfo.keys()) == set(instance_names)
1932 else:
1933 consinfo = None
1934
1935 if query.IQ_NODES in self.requested_data:
1936 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
1937 instance_list)))
1938 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
1939 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
1940 for uuid in set(map(operator.attrgetter("group"),
1941 nodes.values())))
1942 else:
1943 nodes = None
1944 groups = None
1945
1946 if query.IQ_NETWORKS in self.requested_data:
1947 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
1948 for i in instance_list))
1949 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
1950 else:
1951 networks = None
1952
1953 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
1954 disk_usage, offline_nodes, bad_nodes,
1955 live_data, wrongnode_inst, consinfo,
1956 nodes, groups, networks)
1957
1958
1959 class LUInstanceQuery(NoHooksLU):
1960 """Logical unit for querying instances.
1961
1962 """
1963 # pylint: disable=W0142
1964 REQ_BGL = False
1965
1966 def CheckArguments(self):
1967 self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
1968 self.op.output_fields, self.op.use_locking)
1969
1970 def ExpandNames(self):
1971 self.iq.ExpandNames(self)
1972
1973 def DeclareLocks(self, level):
1974 self.iq.DeclareLocks(self, level)
1975
1976 def Exec(self, feedback_fn):
1977 return self.iq.OldStyleQuery(self)
1978
1979
1980 class LUInstanceQueryData(NoHooksLU):
1981 """Query runtime instance data.
1982
1983 """
1984 REQ_BGL = False
1985
1986 def ExpandNames(self):
1987 self.needed_locks = {}
1988
1989 # Use locking if requested or when non-static information is wanted
1990 if not (self.op.static or self.op.use_locking):
1991 self.LogWarning("Non-static data requested, locks need to be acquired")
1992 self.op.use_locking = True
1993
1994 if self.op.instances or not self.op.use_locking:
1995 # Expand instance names right here
1996 self.wanted_names = _GetWantedInstances(self, self.op.instances)
1997 else:
1998 # Will use acquired locks
1999 self.wanted_names = None
2000
2001 if self.op.use_locking:
2002 self.share_locks = _ShareAll()
2003
2004 if self.wanted_names is None:
2005 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2006 else:
2007 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
2008
2009 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2010 self.needed_locks[locking.LEVEL_NODE] = []
2011 self.needed_locks[locking.LEVEL_NETWORK] = []
2012 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2013
2014 def DeclareLocks(self, level):
2015 if self.op.use_locking:
2016 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
2017 if level == locking.LEVEL_NODEGROUP:
2018
2019 # Lock all groups used by instances optimistically; this requires going
2020 # via the node before it's locked, requiring verification later on
2021 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2022 frozenset(group_uuid
2023 for instance_name in owned_instances
2024 for group_uuid in
2025 self.cfg.GetInstanceNodeGroups(instance_name))
2026
2027 elif level == locking.LEVEL_NODE:
2028 self._LockInstancesNodes()
2029
2030 elif level == locking.LEVEL_NETWORK:
2031 self.needed_locks[locking.LEVEL_NETWORK] = \
2032 frozenset(net_uuid
2033 for instance_name in owned_instances
2034 for net_uuid in
2035 self.cfg.GetInstanceNetworks(instance_name))
2036
2037 def CheckPrereq(self):
2038 """Check prerequisites.
2039
2040 This only checks the optional instance list against the existing names.
2041
2042 """
2043 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
2044 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
2045 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
2046 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
2047
2048 if self.wanted_names is None:
2049 assert self.op.use_locking, "Locking was not used"
2050 self.wanted_names = owned_instances
2051
2052 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
2053
2054 if self.op.use_locking:
2055 _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
2056 None)
2057 else:
2058 assert not (owned_instances or owned_groups or
2059 owned_nodes or owned_networks)
2060
2061 self.wanted_instances = instances.values()
2062
2063 def _ComputeBlockdevStatus(self, node, instance, dev):
2064 """Returns the status of a block device
2065
2066 """
2067 if self.op.static or not node:
2068 return None
2069
2070 self.cfg.SetDiskID(dev, node)
2071
2072 result = self.rpc.call_blockdev_find(node, dev)
2073 if result.offline:
2074 return None
2075
2076 result.Raise("Can't compute disk status for %s" % instance.name)
2077
2078 status = result.payload
2079 if status is None:
2080 return None
2081
2082 return (status.dev_path, status.major, status.minor,
2083 status.sync_percent, status.estimated_time,
2084 status.is_degraded, status.ldisk_status)
2085
2086 def _ComputeDiskStatus(self, instance, snode, dev):
2087 """Compute block device status.
2088
2089 """
2090 (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
2091
2092 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
2093
2094 def _ComputeDiskStatusInner(self, instance, snode, dev):
2095 """Compute block device status.
2096
2097 @attention: The device has to be annotated already.
2098
2099 """
2100 if dev.dev_type in constants.LDS_DRBD:
2101 # we change the snode then (otherwise we use the one passed in)
2102 if dev.logical_id[0] == instance.primary_node:
2103 snode = dev.logical_id[1]
2104 else:
2105 snode = dev.logical_id[0]
2106
2107 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
2108 instance, dev)
2109 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
2110
2111 if dev.children:
2112 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
2113 instance, snode),
2114 dev.children)
2115 else:
2116 dev_children = []
2117
2118 return {
2119 "iv_name": dev.iv_name,
2120 "dev_type": dev.dev_type,
2121 "logical_id": dev.logical_id,
2122 "physical_id": dev.physical_id,
2123 "pstatus": dev_pstatus,
2124 "sstatus": dev_sstatus,
2125 "children": dev_children,
2126 "mode": dev.mode,
2127 "size": dev.size,
2128 "name": dev.name,
2129 "uuid": dev.uuid,
2130 }
2131
2132 def Exec(self, feedback_fn):
2133 """Gather and return data"""
2134 result = {}
2135
2136 cluster = self.cfg.GetClusterInfo()
2137
2138 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
2139 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
2140
2141 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
2142 for node in nodes.values()))
2143
2144 group2name_fn = lambda uuid: groups[uuid].name
2145 for instance in self.wanted_instances:
2146 pnode = nodes[instance.primary_node]
2147
2148 if self.op.static or pnode.offline:
2149 remote_state = None
2150 if pnode.offline:
2151 self.LogWarning("Primary node %s is marked offline, returning static"
2152 " information only for instance %s" %
2153 (pnode.name, instance.name))
2154 else:
2155 remote_info = self.rpc.call_instance_info(instance.primary_node,
2156 instance.name,
2157 instance.hypervisor)
2158 remote_info.Raise("Error checking node %s" % instance.primary_node)
2159 remote_info = remote_info.payload
2160 if remote_info and "state" in remote_info:
2161 remote_state = "up"
2162 else:
2163 if instance.admin_state == constants.ADMINST_UP:
2164 remote_state = "down"
2165 else:
2166 remote_state = instance.admin_state
2167
2168 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
2169 instance.disks)
2170
2171 snodes_group_uuids = [nodes[snode_name].group
2172 for snode_name in instance.secondary_nodes]
2173
2174 result[instance.name] = {
2175 "name": instance.name,
2176 "config_state": instance.admin_state,
2177 "run_state": remote_state,
2178 "pnode": instance.primary_node,
2179 "pnode_group_uuid": pnode.group,
2180 "pnode_group_name": group2name_fn(pnode.group),
2181 "snodes": instance.secondary_nodes,
2182 "snodes_group_uuids": snodes_group_uuids,
2183 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
2184 "os": instance.os,
2185 # this happens to be the same format used for hooks
2186 "nics": _NICListToTuple(self, instance.nics),
2187 "disk_template": instance.disk_template,
2188 "disks": disks,
2189 "hypervisor": instance.hypervisor,
2190 "network_port": instance.network_port,
2191 "hv_instance": instance.hvparams,
2192 "hv_actual": cluster.FillHV(instance, skip_globals=True),
2193 "be_instance": instance.beparams,
2194 "be_actual": cluster.FillBE(instance),
2195 "os_instance": instance.osparams,
2196 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
2197 "serial_no": instance.serial_no,
2198 "mtime": instance.mtime,
2199 "ctime": instance.ctime,
2200 "uuid": instance.uuid,
2201 }
2202
2203 return result
2204
2205
2206 class LUInstanceMultiAlloc(NoHooksLU):
2207 """Allocates multiple instances at the same time.
2208
2209 """
2210 REQ_BGL = False
2211
2212 def CheckArguments(self):
2213 """Check arguments.
2214
2215 """
2216 nodes = []
2217 for inst in self.op.instances:
2218 if inst.iallocator is not None:
2219 raise errors.OpPrereqError("iallocator are not allowed to be set on"
2220 " instance objects", errors.ECODE_INVAL)
2221 nodes.append(bool(inst.pnode))
2222 if inst.disk_template in constants.DTS_INT_MIRROR:
2223 nodes.append(bool(inst.snode))
2224
2225 has_nodes = compat.any(nodes)
2226 if compat.all(nodes) ^ has_nodes:
2227 raise errors.OpPrereqError("There are instance objects providing"
2228 " pnode/snode while others do not",
2229 errors.ECODE_INVAL)
2230
2231 if self.op.iallocator is None:
2232 default_iallocator = self.cfg.GetDefaultIAllocator()
2233 if default_iallocator and has_nodes:
2234 self.op.iallocator = default_iallocator
2235 else:
2236 raise errors.OpPrereqError("No iallocator or nodes on the instances"
2237 " given and no cluster-wide default"
2238 " iallocator found; please specify either"
2239 " an iallocator or nodes on the instances"
2240 " or set a cluster-wide default iallocator",
2241 errors.ECODE_INVAL)
2242
2243 _CheckOpportunisticLocking(self.op)
2244
2245 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2246 if dups:
2247 raise errors.OpPrereqError("There are duplicate instance names: %s" %
2248 utils.CommaJoin(dups), errors.ECODE_INVAL)
2249
2250 def ExpandNames(self):
2251 """Calculate the locks.
2252
2253 """
2254 self.share_locks = _ShareAll()
2255 self.needed_locks = {
2256 # iallocator will select nodes and even if no iallocator is used,
2257 # collisions with LUInstanceCreate should be avoided
2258 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2259 }
2260
2261 if self.op.iallocator:
2262 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2263 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2264
2265 if self.op.opportunistic_locking:
2266 self.opportunistic_locks[locking.LEVEL_NODE] = True
2267 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2268 else:
2269 nodeslist = []
2270 for inst in self.op.instances:
2271 inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
2272 nodeslist.append(inst.pnode)
2273 if inst.snode is not None:
2274 inst.snode = _ExpandNodeName(self.cfg, inst.snode)
2275 nodeslist.append(inst.snode)
2276
2277 self.needed_locks[locking.LEVEL_NODE] = nodeslist
2278 # Lock resources of instance's primary and secondary nodes (copy to
2279 # prevent accidential modification)
2280 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2281
2282 def CheckPrereq(self):
2283 """Check prerequisite.
2284
2285 """
2286 cluster = self.cfg.GetClusterInfo()
2287 default_vg = self.cfg.GetVGName()
2288 ec_id = self.proc.GetECId()
2289
2290 if self.op.opportunistic_locking:
2291 # Only consider nodes for which a lock is held
2292 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
2293 else:
2294 node_whitelist = None
2295
2296 insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
2297 _ComputeNics(op, cluster, None,
2298 self.cfg, ec_id),
2299 _ComputeFullBeParams(op, cluster),
2300 node_whitelist)
2301 for op in self.op.instances]
2302
2303 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2304 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2305
2306 ial.Run(self.op.iallocator)
2307
2308 if not ial.success:
2309 raise errors.OpPrereqError("Can't compute nodes using"
2310 " iallocator '%s': %s" %
2311 (self.op.iallocator, ial.info),
2312 errors.ECODE_NORES)
2313
2314 self.ia_result = ial.result
2315
2316 if self.op.dry_run:
2317 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2318 constants.JOB_IDS_KEY: [],
2319 })
2320
2321 def _ConstructPartialResult(self):
2322 """Contructs the partial result.
2323
2324 """
2325 (allocatable, failed) = self.ia_result
2326 return {
2327 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
2328 map(compat.fst, allocatable),
2329 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
2330 }
2331
2332 def Exec(self, feedback_fn):
2333 """Executes the opcode.
2334
2335 """
2336 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2337 (allocatable, failed) = self.ia_result
2338
2339 jobs = []
2340 for (name, nodes) in allocatable:
2341 op = op2inst.pop(name)
2342
2343 if len(nodes) > 1:
2344 (op.pnode, op.snode) = nodes
2345 else:
2346 (op.pnode,) = nodes
2347
2348 jobs.append([op])
2349
2350 missing = set(op2inst.keys()) - set(failed)
2351 assert not missing, \
2352 "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
2353
2354 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2355
2356
2357 class _InstNicModPrivate:
2358 """Data structure for network interface modifications.
2359
2360 Used by L{LUInstanceSetParams}.
2361
2362 """
2363 def __init__(self):
2364 self.params = None
2365 self.filled = None
2366
2367
2368 def PrepareContainerMods(mods, private_fn):
2369 """Prepares a list of container modifications by adding a private data field.
2370
2371 @type mods: list of tuples; (operation, index, parameters)
2372 @param mods: List of modifications
2373 @type private_fn: callable or None
2374 @param private_fn: Callable for constructing a private data field for a
2375 modification
2376 @rtype: list
2377
2378 """
2379 if private_fn is None:
2380 fn = lambda: None
2381 else:
2382 fn = private_fn
2383
2384 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2385
2386
2387 def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2388 """Checks if nodes have enough physical CPUs
2389
2390 This function checks if all given nodes have the needed number of
2391 physical CPUs. In case any node has less CPUs or we cannot get the
2392 information from the node, this function raises an OpPrereqError
2393 exception.
2394
2395 @type lu: C{LogicalUnit}
2396 @param lu: a logical unit from which we get configuration data
2397 @type nodenames: C{list}
2398 @param nodenames: the list of node names to check
2399 @type requested: C{int}
2400 @param requested: the minimum acceptable number of physical CPUs
2401 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2402 or we cannot check the node
2403
2404 """
2405 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2406 for node in nodenames:
2407 info = nodeinfo[node]
2408 info.Raise("Cannot get current information from node %s" % node,
2409 prereq=True, ecode=errors.ECODE_ENVIRON)
2410 (_, _, (hv_info, )) = info.payload
2411 num_cpus = hv_info.get("cpu_total", None)
2412 if not isinstance(num_cpus, int):
2413 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2414 " on node %s, result was '%s'" %
2415 (node, num_cpus), errors.ECODE_ENVIRON)
2416 if requested > num_cpus:
2417 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2418 "required" % (node, num_cpus, requested),
2419 errors.ECODE_NORES)
2420
2421
2422 def GetItemFromContainer(identifier, kind, container):
2423 """Return the item refered by the identifier.
2424
2425 @type identifier: string
2426 @param identifier: Item index or name or UUID
2427 @type kind: string
2428 @param kind: One-word item description
2429 @type container: list
2430 @param container: Container to get the item from
2431
2432 """
2433 # Index
2434 try:
2435 idx = int(identifier)
2436 if idx == -1:
2437 # Append
2438 absidx = len(container) - 1
2439 elif idx < 0:
2440 raise IndexError("Not accepting negative indices other than -1")
2441 elif idx > len(container):
2442 raise IndexError("Got %s index %s, but there are only %s" %
2443 (kind, idx, len(container)))
2444 else:
2445 absidx = idx
2446 return (absidx, container[idx])
2447 except ValueError:
2448 pass
2449
2450 for idx, item in enumerate(container):
2451 if item.uuid == identifier or item.name == identifier:
2452 return (idx, item)
2453
2454 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2455 (kind, identifier), errors.ECODE_NOENT)
2456
2457
2458 def ApplyContainerMods(kind, container, chgdesc, mods,
2459 create_fn, modify_fn, remove_fn):
2460 """Applies descriptions in C{mods} to C{container}.
2461
2462 @type kind: string
2463 @param kind: One-word item description
2464 @type container: list
2465 @param container: Container to modify
2466 @type chgdesc: None or list
2467 @param chgdesc: List of applied changes
2468 @type mods: list
2469 @param mods: Modifications as returned by L{PrepareContainerMods}
2470 @type create_fn: callable
2471 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2472 receives absolute item index, parameters and private data object as added
2473 by L{PrepareContainerMods}, returns tuple containing new item and changes
2474 as list
2475 @type modify_fn: callable
2476 @param modify_fn: Callback for modifying an existing item
2477 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2478 and private data object as added by L{PrepareContainerMods}, returns
2479 changes as list
2480 @type remove_fn: callable
2481 @param remove_fn: Callback on removing item; receives absolute item index,
2482 item and private data object as added by L{PrepareContainerMods}
2483
2484 """
2485 for (op, identifier, params, private) in mods:
2486 changes = None
2487
2488 if op == constants.DDM_ADD:
2489 # Calculate where item will be added
2490 # When adding an item, identifier can only be an index
2491 try:
2492 idx = int(identifier)
2493 except ValueError:
2494 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2495 " identifier for %s" % constants.DDM_ADD,
2496 errors.ECODE_INVAL)
2497 if idx == -1:
2498 addidx = len(container)
2499 else:
2500 if idx < 0:
2501 raise IndexError("Not accepting negative indices other than -1")
2502 elif idx > len(container):
2503 raise IndexError("Got %s index %s, but there are only %s" %
2504 (kind, idx, len(container)))
2505 addidx = idx
2506
2507 if create_fn is None:
2508 item = params
2509 else:
2510 (item, changes) = create_fn(addidx, params, private)
2511
2512 if idx == -1:
2513 container.append(item)
2514 else:
2515 assert idx >= 0
2516 assert idx <= len(container)
2517 # list.insert does so before the specified index
2518 container.insert(idx, item)
2519 else:
2520 # Retrieve existing item
2521 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2522
2523 if op == constants.DDM_REMOVE:
2524 assert not params
2525
2526 if remove_fn is not None:
2527 remove_fn(absidx, item, private)
2528
2529 changes = [("%s/%s" % (kind, absidx), "remove")]
2530
2531 assert container[absidx] == item
2532 del container[absidx]
2533 elif op == constants.DDM_MODIFY:
2534 if modify_fn is not None:
2535 changes = modify_fn(absidx, item, params, private)
2536 else:
2537 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2538
2539 assert _TApplyContModsCbChanges(changes)
2540
2541 if not (chgdesc is None or changes is None):
2542 chgdesc.extend(changes)
2543
2544
2545 def _UpdateIvNames(base_index, disks):
2546 """Updates the C{iv_name} attribute of disks.
2547
2548 @type disks: list of L{objects.Disk}
2549
2550 """
2551 for (idx, disk) in enumerate(disks):
2552 disk.iv_name = "disk/%s" % (base_index + idx, )
2553
2554
2555 class LUInstanceSetParams(LogicalUnit):
2556 """Modifies an instances's parameters.
2557
2558 """
2559 HPATH = "instance-modify"
2560 HTYPE = constants.HTYPE_INSTANCE
2561 REQ_BGL = False
2562
2563 @staticmethod
2564 def _UpgradeDiskNicMods(kind, mods, verify_fn):
2565 assert ht.TList(mods)
2566 assert not mods or len(mods[0]) in (2, 3)
2567
2568 if mods and len(mods[0]) == 2:
2569 result = []
2570
2571 addremove = 0
2572 for op, params in mods:
2573 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2574 result.append((op, -1, params))
2575 addremove += 1
2576
2577 if addremove > 1:
2578 raise errors.OpPrereqError("Only one %s add or remove operation is"
2579 " supported at a time" % kind,
2580 errors.ECODE_INVAL)
2581 else:
2582 result.append((constants.DDM_MODIFY, op, params))
2583
2584 assert verify_fn(result)
2585 else:
2586 result = mods
2587
2588 return result
2589
2590 @staticmethod
2591 def _CheckMods(kind, mods, key_types, item_fn):
2592 """Ensures requested disk/NIC modifications are valid.
2593
2594 """
2595 for (op, _, params) in mods:
2596 assert ht.TDict(params)
2597
2598 # If 'key_types' is an empty dict, we assume we have an
2599 # 'ext' template and thus do not ForceDictType
2600 if key_types:
2601 utils.ForceDictType(params, key_types)
2602
2603 if op == constants.DDM_REMOVE:
2604 if params:
2605 raise errors.OpPrereqError("No settings should be passed when"
2606 " removing a %s" % kind,
2607 errors.ECODE_INVAL)
2608 elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2609 item_fn(op, params)
2610 else:
2611 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2612
2613 @staticmethod
2614 def _VerifyDiskModification(op, params):
2615 """Verifies a disk modification.
2616
2617 """
2618 if op == constants.DDM_ADD:
2619 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2620 if mode not in constants.DISK_ACCESS_SET:
2621 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2622 errors.ECODE_INVAL)
2623
2624 size = params.get(constants.IDISK_SIZE, None)
2625 if size is None:
2626 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2627 constants.IDISK_SIZE, errors.ECODE_INVAL)
2628
2629 try:
2630 size = int(size)
2631 except (TypeError, ValueError), err:
2632 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2633 errors.ECODE_INVAL)
2634
2635 params[constants.IDISK_SIZE] = size
2636 name = params.get(constants.IDISK_NAME, None)
2637 if name is not None and name.lower() == constants.VALUE_NONE:
2638 params[constants.IDISK_NAME] = None
2639
2640 elif op == constants.DDM_MODIFY:
2641 if constants.IDISK_SIZE in params:
2642 raise errors.OpPrereqError("Disk size change not possible, use"
2643 " grow-disk", errors.ECODE_INVAL)
2644 if len(params) > 2:
2645 raise errors.OpPrereqError("Disk modification doesn't support"
2646 " additional arbitrary parameters",
2647 errors.ECODE_INVAL)
2648 name = params.get(constants.IDISK_NAME, None)
2649 if name is not None and name.lower() == constants.VALUE_NONE:
2650 params[constants.IDISK_NAME] = None
2651
2652 @staticmethod
2653 def _VerifyNicModification(op, params):
2654 """Verifies a network interface modification.
2655
2656 """
2657 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2658 ip = params.get(constants.INIC_IP, None)
2659 name = params.get(constants.INIC_NAME, None)
2660 req_net = params.get(constants.INIC_NETWORK, None)
2661 link = params.get(constants.NIC_LINK, None)
2662 mode = params.get(constants.NIC_MODE, None)
2663 if name is not None and name.lower() == constants.VALUE_NONE:
2664 params[constants.INIC_NAME] = None
2665 if req_net is not None:
2666 if req_net.lower() == constants.VALUE_NONE:
2667 params[constants.INIC_NETWORK] = None
2668 req_net = None
2669 elif link is not None or mode is not None:
2670 raise errors.OpPrereqError("If network is given"
2671 " mode or link should not",
2672 errors.ECODE_INVAL)
2673
2674 if op == constants.DDM_ADD:
2675 macaddr = params.get(constants.INIC_MAC, None)
2676 if macaddr is None:
2677 params[constants.INIC_MAC] = constants.VALUE_AUTO
2678
2679 if ip is not None:
2680 if ip.lower() == constants.VALUE_NONE:
2681 params[constants.INIC_IP] = None
2682 else:
2683 if ip.lower() == constants.NIC_IP_POOL:
2684 if op == constants.DDM_ADD and req_net is None:
2685 raise errors.OpPrereqError("If ip=pool, parameter network"
2686 " cannot be none",
2687 errors.ECODE_INVAL)
2688 else:
2689 if not netutils.IPAddress.IsValid(ip):
2690 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2691 errors.ECODE_INVAL)
2692
2693 if constants.INIC_MAC in params:
2694 macaddr = params[constants.INIC_MAC]
2695 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2696 macaddr = utils.NormalizeAndValidateMac(macaddr)
2697
2698 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2699 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2700 " modifying an existing NIC",
2701 errors.ECODE_INVAL)
2702
2703 def CheckArguments(self):
2704 if not (self.op.nics or self.op.disks or self.op.disk_template or
2705 self.op.hvparams or self.op.beparams or self.op.os_name or
2706 self.op.offline is not None or self.op.runtime_mem or
2707 self.op.pnode):
2708 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2709
2710 if self.op.hvparams:
2711 _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2712 "hypervisor", "instance", "cluster")
2713
2714 self.op.disks = self._UpgradeDiskNicMods(
2715 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2716 self.op.nics = self._UpgradeDiskNicMods(
2717 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2718
2719 if self.op.disks and self.op.disk_template is not None:
2720 raise errors.OpPrereqError("Disk template conversion and other disk"
2721 " changes not supported at the same time",
2722 errors.ECODE_INVAL)
2723
2724 if (self.op.disk_template and
2725 self.op.disk_template in constants.DTS_INT_MIRROR and
2726 self.op.remote_node is None):
2727 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2728 " one requires specifying a secondary node",
2729 errors.ECODE_INVAL)
2730
2731 # Check NIC modifications
2732 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2733 self._VerifyNicModification)
2734
2735 if self.op.pnode:
2736 self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
2737
2738 def ExpandNames(self):
2739 self._ExpandAndLockInstance()
2740 self.needed_locks[locking.LEVEL_NODEGROUP] = []
2741 # Can't even acquire node locks in shared mode as upcoming changes in
2742 # Ganeti 2.6 will start to modify the node object on disk conversion
2743 self.needed_locks[locking.LEVEL_NODE] = []
2744 self.needed_locks[locking.LEVEL_NODE_RES] = []
2745 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2746 # Look node group to look up the ipolicy
2747 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2748
2749 def DeclareLocks(self, level):
2750 if level == locking.LEVEL_NODEGROUP:
2751 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2752 # Acquire locks for the instance's nodegroups optimistically. Needs
2753 # to be verified in CheckPrereq
2754 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2755 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2756 elif level == locking.LEVEL_NODE:
2757 self._LockInstancesNodes()
2758 if self.op.disk_template and self.op.remote_node:
2759 self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
2760 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2761 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2762 # Copy node locks
2763 self.needed_locks[locking.LEVEL_NODE_RES] = \
2764 _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2765
2766 def BuildHooksEnv(self):
2767 """Build hooks env.
2768
2769 This runs on the master, primary and secondaries.
2770
2771 """
2772 args = {}
2773 if constants.BE_MINMEM in self.be_new:
2774 args["minmem"] = self.be_new[constants.BE_MINMEM]
2775 if constants.BE_MAXMEM in self.be_new:
2776 args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2777 if constants.BE_VCPUS in self.be_new:
2778 args["vcpus"] = self.be_new[constants.BE_VCPUS]
2779 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2780 # information at all.
2781
2782 if self._new_nics is not None:
2783 nics = []
2784
2785 for nic in self._new_nics:
2786 n = copy.deepcopy(nic)
2787 nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2788 n.nicparams = nicparams
2789 nics.append(_NICToTuple(self, n))
2790
2791 args["nics"] = nics
2792
2793 env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
2794 if self.op.disk_template:
2795 env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2796 if self.op.runtime_mem:
2797 env["RUNTIME_MEMORY"] = self.op.runtime_mem
2798
2799 return env
2800
2801 def BuildHooksNodes(self):
2802 """Build hooks nodes.
2803
2804 """
2805 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2806 return (nl, nl)
2807
2808 def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2809 old_params, cluster, pnode):
2810
2811 update_params_dict = dict([(key, params[key])
2812 for key in constants.NICS_PARAMETERS
2813 if key in params])
2814
2815 req_link = update_params_dict.get(constants.NIC_LINK, None)
2816 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2817
2818 new_net_uuid = None
2819 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2820 if new_net_uuid_or_name:
2821 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2822 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2823
2824 if old_net_uuid:
2825 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2826
2827 if new_net_uuid:
2828 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2829 if not netparams:
2830 raise errors.OpPrereqError("No netparams found for the network"
2831 " %s, probably not connected" %
2832 new_net_obj.name, errors.ECODE_INVAL)
2833 new_params = dict(netparams)
2834 else:
2835 new_params = _GetUpdatedParams(old_params, update_params_dict)
2836
2837 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2838
2839 new_filled_params = cluster.SimpleFillNIC(new_params)
2840 objects.NIC.CheckParameterSyntax(new_filled_params)
2841
2842 new_mode = new_filled_params[constants.NIC_MODE]
2843 if new_mode == constants.NIC_MODE_BRIDGED:
2844 bridge = new_filled_params[constants.NIC_LINK]
2845 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2846 if msg:
2847 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2848 if self.op.force:
2849 self.warn.append(msg)
2850 else:
2851 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2852
2853 elif new_mode == constants.NIC_MODE_ROUTED:
2854 ip = params.get(constants.INIC_IP, old_ip)
2855 if ip is None:
2856 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2857 " on a routed NIC", errors.ECODE_INVAL)
2858
2859 elif new_mode == constants.NIC_MODE_OVS:
2860 # TODO: check OVS link
2861 self.LogInfo("OVS links are currently not checked for correctness")
2862
2863 if constants.INIC_MAC in params:
2864 mac = params[constants.INIC_MAC]
2865 if mac is None:
2866 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2867 errors.ECODE_INVAL)
2868 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2869 # otherwise generate the MAC address
2870 params[constants.INIC_MAC] = \
2871 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2872 else:
2873 # or validate/reserve the current one
2874 try:
2875 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2876 except errors.ReservationError:
2877 raise errors.OpPrereqError("MAC address '%s' already in use"
2878 " in cluster" % mac,
2879 errors.ECODE_NOTUNIQUE)
2880 elif new_net_uuid != old_net_uuid:
2881
2882 def get_net_prefix(net_uuid):
2883 mac_prefix = None
2884 if net_uuid:
2885 nobj = self.cfg.GetNetwork(net_uuid)
2886 mac_prefix = nobj.mac_prefix
2887
2888 return mac_prefix
2889
2890 new_prefix = get_net_prefix(new_net_uuid)
2891 old_prefix = get_net_prefix(old_net_uuid)
2892 if old_prefix != new_prefix:
2893 params[constants.INIC_MAC] = \
2894 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2895
2896 # if there is a change in (ip, network) tuple
2897 new_ip = params.get(constants.INIC_IP, old_ip)
2898 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2899 if new_ip:
2900 # if IP is pool then require a network and generate one IP
2901 if new_ip.lower() == constants.NIC_IP_POOL:
2902 if new_net_uuid:
2903 try:
2904 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2905 except errors.ReservationError:
2906 raise errors.OpPrereqError("Unable to get a free IP"
2907 " from the address pool",
2908 errors.ECODE_STATE)
2909 self.LogInfo("Chose IP %s from network %s",
2910 new_ip,
2911 new_net_obj.name)
2912 params[constants.INIC_IP] = new_ip
2913 else:
2914 raise errors.OpPrereqError("ip=pool, but no network found",
2915 errors.ECODE_INVAL)
2916 # Reserve new IP if in the new network if any
2917 elif new_net_uuid:
2918 try:
2919 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2920 self.LogInfo("Reserving IP %s in network %s",
2921 new_ip, new_net_obj.name)
2922 except errors.ReservationError:
2923 raise errors.OpPrereqError("IP %s not available in network %s" %
2924 (new_ip, new_net_obj.name),
2925 errors.ECODE_NOTUNIQUE)
2926 # new network is None so check if new IP is a conflicting IP
2927 elif self.op.conflicts_check:
2928 _CheckForConflictingIp(self, new_ip, pnode)
2929
2930 # release old IP if old network is not None
2931 if old_ip and old_net_uuid:
2932 try:
2933 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2934 except errors.AddressPoolError:
2935 logging.warning("Release IP %s not contained in network %s",
2936 old_ip, old_net_obj.name)
2937
2938 # there are no changes in (ip, network) tuple and old network is not None
2939 elif (old_net_uuid is not None and
2940 (req_link is not None or req_mode is not None)):
2941 raise errors.OpPrereqError("Not allowed to change link or mode of"
2942 " a NIC that is connected to a network",
2943 errors.ECODE_INVAL)
2944
2945 private.params = new_params
2946 private.filled = new_filled_params
2947
2948 def _PreCheckDiskTemplate(self, pnode_info):
2949 """CheckPrereq checks related to a new disk template."""
2950 # Arguments are passed to avoid configuration lookups
2951 instance = self.instance
2952 pnode = instance.primary_node
2953 cluster = self.cluster
2954 if instance.disk_template == self.op.disk_template:
2955 raise errors.OpPrereqError("Instance already has disk template %s" %
2956 instance.disk_template, errors.ECODE_INVAL)
2957
2958 if (instance.disk_template,
2959 self.op.disk_template) not in self._DISK_CONVERSIONS:
2960 raise errors.OpPrereqError("Unsupported disk template conversion from"
2961 " %s to %s" % (instance.disk_template,
2962 self.op.disk_template),
2963 errors.ECODE_INVAL)
2964 _CheckInstanceState(self, instance, INSTANCE_DOWN,
2965 msg="cannot change disk template")
2966 if self.op.disk_template in constants.DTS_INT_MIRROR:
2967 if self.op.remote_node == pnode:
2968 raise errors.OpPrereqError("Given new secondary node %s is the same"
2969 " as the primary node of the instance" %
2970 self.op.remote_node, errors.ECODE_STATE)
2971 _CheckNodeOnline(self, self.op.remote_node)
2972 _CheckNodeNotDrained(self, self.op.remote_node)
2973 # FIXME: here we assume that the old instance type is DT_PLAIN
2974 assert instance.disk_template == constants.DT_PLAIN
2975 disks = [{constants.IDISK_SIZE: d.size,
2976 constants.IDISK_VG: d.logical_id[0]}
2977 for d in instance.disks]
2978 required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
2979 _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2980
2981 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2982 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2983 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2984 snode_group)
2985 _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2986 ignore=self.op.ignore_ipolicy)
2987 if pnode_info.group != snode_info.group:
2988 self.LogWarning("The primary and secondary nodes are in two"
2989 " different node groups; the disk parameters"
2990 " from the first disk's node group will be"
2991 " used")
2992
2993 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2994 # Make sure none of the nodes require exclusive storage
2995 nodes = [pnode_info]
2996 if self.op.disk_template in constants.DTS_INT_MIRROR:
2997 assert snode_info
2998 nodes.append(snode_info)
2999 has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
3000 if compat.any(map(has_es, nodes)):
3001 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
3002 " storage is enabled" % (instance.disk_template,
3003 self.op.disk_template))
3004 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
3005
3006 def CheckPrereq(self):
3007 """Check prerequisites.
3008
3009 This only checks the instance list against the existing names.
3010
3011 """
3012 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3013 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3014
3015 cluster = self.cluster = self.cfg.GetClusterInfo()
3016 assert self.instance is not None, \
3017 "Cannot retrieve locked instance %s" % self.op.instance_name
3018
3019 pnode = instance.primary_node
3020
3021 self.warn = []
3022
3023 if (self.op.pnode is not None and self.op.pnode != pnode and
3024 not self.op.force):
3025 # verify that the instance is not up
3026 instance_info = self.rpc.call_instance_info(pnode, instance.name,
3027 instance.hypervisor)
3028 if instance_info.fail_msg:
3029 self.warn.append("Can't get instance runtime information: %s" %
3030 instance_info.fail_msg)
3031 elif instance_info.payload:
3032 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
3033 errors.ECODE_STATE)
3034
3035 assert pnode in self.owned_locks(locking.LEVEL_NODE)
3036 nodelist = list(instance.all_nodes)
3037 pnode_info = self.cfg.GetNodeInfo(pnode)
3038 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
3039
3040 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3041 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3042 group_info = self.cfg.GetNodeGroup(pnode_info.group)
3043
3044 # dictionary with instance information after the modification
3045 ispec = {}
3046
3047 # Check disk modifications. This is done here and not in CheckArguments
3048 # (as with NICs), because we need to know the instance's disk template
3049 if instance.disk_template == constants.DT_EXT:
3050 self._CheckMods("disk", self.op.disks, {},
3051 self._VerifyDiskModification)
3052 else:
3053 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
3054 self._VerifyDiskModification)
3055
3056 # Prepare disk/NIC modifications
3057 self.diskmod = PrepareContainerMods(self.op.disks, None)
3058 self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3059
3060 # Check the validity of the `provider' parameter
3061 if instance.disk_template in constants.DT_EXT:
3062 for mod in self.diskmod:
3063 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
3064 if mod[0] == constants.DDM_ADD:
3065 if ext_provider is None:
3066 raise errors.OpPrereqError("Instance template is '%s' and parameter"
3067 " '%s' missing, during disk add" %
3068 (constants.DT_EXT,
3069 constants.IDISK_PROVIDER),
3070 errors.ECODE_NOENT)
3071 elif mod[0] == constants.DDM_MODIFY:
3072 if ext_provider:
3073 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
3074 " modification" %
3075 constants.IDISK_PROVIDER,
3076 errors.ECODE_INVAL)
3077 else:
3078 for mod in self.diskmod:
3079 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
3080 if ext_provider is not None:
3081 raise errors.OpPrereqError("Parameter '%s' is only valid for"
3082 " instances of type '%s'" %
3083 (constants.IDISK_PROVIDER,
3084 constants.DT_EXT),
3085 errors.ECODE_INVAL)
3086
3087 # OS change
3088 if self.op.os_name and not self.op.force:
3089 _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
3090 self.op.force_variant)
3091 instance_os = self.op.os_name
3092 else:
3093 instance_os = instance.os
3094
3095 assert not (self.op.disk_template and self.op.disks), \
3096 "Can't modify disk template and apply disk changes at the same time"
3097
3098 if self.op.disk_template:
3099 self._PreCheckDiskTemplate(pnode_info)
3100
3101 # hvparams processing
3102 if self.op.hvparams:
3103 hv_type = instance.hypervisor
3104 i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
3105 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3106 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
3107
3108 # local check
3109 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3110 _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
3111 self.hv_proposed = self.hv_new = hv_new # the new actual values
3112 self.hv_inst = i_hvdict # the new dict (without defaults)
3113 else:
3114 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
3115 instance.hvparams)
3116 self.hv_new = self.hv_inst = {}
3117
3118 # beparams processing
3119 if self.op.beparams:
3120 i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
3121 use_none=True)
3122 objects.UpgradeBeParams(i_bedict)
3123 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3124 be_new = cluster.SimpleFillBE(i_bedict)
3125 self.be_proposed = self.be_new = be_new # the new actual values
3126 self.be_inst = i_bedict # the new dict (without defaults)
3127 else:
3128 self.be_new = self.be_inst = {}
3129 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
3130 be_old = cluster.FillBE(instance)
3131
3132 # CPU param validation -- checking every time a parameter is
3133 # changed to cover all cases where either CPU mask or vcpus have
3134 # changed
3135 if (constants.BE_VCPUS in self.be_proposed and
3136 constants.HV_CPU_MASK in self.hv_proposed):
3137 cpu_list = \
3138 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3139 # Verify mask is consistent with number of vCPUs. Can skip this
3140 # test if only 1 entry in the CPU mask, which means same mask
3141 # is applied to all vCPUs.
3142 if (len(cpu_list) > 1 and
3143 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3144 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3145 " CPU mask [%s]" %
3146 (self.be_proposed[constants.BE_VCPUS],
3147 self.hv_proposed[constants.HV_CPU_MASK]),
3148 errors.ECODE_INVAL)
3149
3150 # Only perform this test if a new CPU mask is given
3151 if constants.HV_CPU_MASK in self.hv_new:
3152 # Calculate the largest CPU number requested
3153 max_requested_cpu = max(map(max, cpu_list))
3154 # Check that all of the instance's nodes have enough physical CPUs to
3155 # satisfy the requested CPU mask
3156 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
3157 max_requested_cpu + 1, instance.hypervisor)
3158
3159 # osparams processing
3160 if self.op.osparams:
3161 i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
3162 _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
3163 self.os_inst = i_osdict # the new dict (without defaults)
3164 else:
3165 self.os_inst = {}
3166
3167 #TODO(dynmem): do the appropriate check involving MINMEM
3168 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3169 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3170 mem_check_list = [pnode]
3171 if be_new[constants.BE_AUTO_BALANCE]:
3172 # either we changed auto_balance to yes or it was from before
3173 mem_check_list.extend(instance.secondary_nodes)
3174 instance_info = self.rpc.call_instance_info(pnode, instance.name,
3175 instance.hypervisor)
3176 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3177 [instance.hypervisor], False)
3178 pninfo = nodeinfo[pnode]
3179 msg = pninfo.fail_msg
3180 if msg:
3181 # Assume the primary node is unreachable and go ahead
3182 self.warn.append("Can't get info from primary node %s: %s" %
3183 (pnode, msg))
3184 else:
3185 (_, _, (pnhvinfo, )) = pninfo.payload
3186 if not isinstance(pnhvinfo.get("memory_free", None), int):
3187 self.warn.append("Node data from primary node %s doesn't contain"
3188 " free memory information" % pnode)
3189 elif instance_info.fail_msg:
3190 self.warn.append("Can't get instance runtime information: %s" %
3191 instance_info.fail_msg)
3192 else:
3193 if instance_info.payload:
3194 current_mem = int(instance_info.payload["memory"])
3195 else:
3196 # Assume instance not running
3197 # (there is a slight race condition here, but it's not very
3198 # probable, and we have no other way to check)
3199 # TODO: Describe race condition
3200 current_mem = 0
3201 #TODO(dynmem): do the appropriate check involving MINMEM
3202 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3203 pnhvinfo["memory_free"])
3204 if miss_mem > 0:
3205 raise errors.OpPrereqError("This change will prevent the instance"
3206 " from starting, due to %d MB of memory"
3207 " missing on its primary node" %
3208 miss_mem, errors.ECODE_NORES)
3209
3210 if be_new[constants.BE_AUTO_BALANCE]:
3211 for node, nres in nodeinfo.items():
3212 if node not in instance.secondary_nodes:
3213 continue
3214 nres.Raise("Can't get info from secondary node %s" % node,
3215 prereq=True, ecode=errors.ECODE_STATE)
3216 (_, _, (nhvinfo, )) = nres.payload
3217 if not isinstance(nhvinfo.get("memory_free", None), int):
3218 raise errors.OpPrereqError("Secondary node %s didn't return free"
3219 " memory information" % node,
3220 errors.ECODE_STATE)
3221 #TODO(dynmem): do the appropriate check involving MINMEM
3222 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3223 raise errors.OpPrereqError("This change will prevent the instance"
3224 " from failover to its secondary node"
3225 " %s, due to not enough memory" % node,
3226 errors.ECODE_STATE)
3227
3228 if self.op.runtime_mem:
3229 remote_info = self.rpc.call_instance_info(instance.primary_node,
3230 instance.name,
3231 instance.hypervisor)
3232 remote_info.Raise("Error checking node %s" % instance.primary_node)
3233 if not remote_info.payload: # not running already
3234 raise errors.OpPrereqError("Instance %s is not running" %
3235 instance.name, errors.ECODE_STATE)
3236
3237 current_memory = remote_info.payload["memory"]
3238 if (not self.op.force and
3239 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3240 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3241 raise errors.OpPrereqError("Instance %s must have memory between %d"
3242 " and %d MB of memory unless --force is"
3243 " given" %
3244 (instance.name,
3245 self.be_proposed[constants.BE_MINMEM],
3246 self.be_proposed[constants.BE_MAXMEM]),
3247 errors.ECODE_INVAL)
3248
3249 delta = self.op.runtime_mem - current_memory
3250 if delta > 0:
3251 _CheckNodeFreeMemory(self, instance.primary_node,
3252 "ballooning memory for instance %s" %
3253 instance.name, delta, instance.hypervisor)
3254
3255 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
3256 raise errors.OpPrereqError("Disk operations not supported for"
3257 " diskless instances", errors.ECODE_INVAL)
3258
3259 def _PrepareNicCreate(_, params, private):
3260 self._PrepareNicModification(params, private, None, None,
3261 {}, cluster, pnode)
3262 return (None, None)
3263
3264 def _PrepareNicMod(_, nic, params, private):
3265 self._PrepareNicModification(params, private, nic.ip, nic.network,
3266 nic.nicparams, cluster, pnode)
3267 return None
3268
3269 def _PrepareNicRemove(_, params, __):
3270 ip = params.ip
3271 net = params.network
3272 if net is not None and ip is not None:
3273 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3274
3275 # Verify NIC changes (operating on copy)
3276 nics = instance.nics[:]
3277 ApplyContainerMods("NIC", nics, None, self.nicmod,
3278 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3279 if len(nics) > constants.MAX_NICS:
3280 raise errors.OpPrereqError("Instance has too many network interfaces"
3281 " (%d), cannot add more" % constants.MAX_NICS,
3282 errors.ECODE_STATE)
3283
3284 def _PrepareDiskMod(_, disk, params, __):
3285 disk.name = params.get(constants.IDISK_NAME, None)
3286
3287 # Verify disk changes (operating on a copy)
3288 disks = copy.deepcopy(instance.disks)
3289 ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
3290 None)
3291 utils.ValidateDeviceNames("disk", disks)
3292 if len(disks) > constants.MAX_DISKS:
3293 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
3294 " more" % constants.MAX_DISKS,
3295 errors.ECODE_STATE)
3296 disk_sizes = [disk.size for disk in instance.disks]
3297 disk_sizes.extend(params["size"] for (op, idx, params, private) in
3298 self.diskmod if op == constants.DDM_ADD)
3299 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
3300 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
3301
3302 if self.op.offline is not None and self.op.offline:
3303 _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
3304 msg="can't change to offline")
3305
3306 # Pre-compute NIC changes (necessary to use result in hooks)
3307 self._nic_chgdesc = []
3308 if self.nicmod:
3309 # Operate on copies as this is still in prereq
3310 nics = [nic.Copy() for nic in instance.nics]
3311 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3312 self._CreateNewNic, self._ApplyNicMods, None)
3313 # Verify that NIC names are unique and valid
3314 utils.ValidateDeviceNames("NIC", nics)
3315 self._new_nics = nics
3316 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3317 else:
3318 self._new_nics = None
3319 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3320
3321 if not self.op.ignore_ipolicy:
3322 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3323 group_info)
3324
3325 # Fill ispec with backend parameters
3326 ispec[constants.ISPEC_SPINDLE_USE] = \
3327 self.be_new.get(constants.BE_SPINDLE_USE, None)
3328 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3329 None)
3330
3331 # Copy ispec to verify parameters with min/max values separately
3332 if self.op.disk_template:
3333 new_disk_template = self.op.disk_template
3334 else:
3335 new_disk_template = instance.disk_template
3336 ispec_max = ispec.copy()
3337 ispec_max[constants.ISPEC_MEM_SIZE] = \
3338 self.be_new.get(constants.BE_MAXMEM, None)
3339 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3340 new_disk_template)
3341 ispec_min = ispec.copy()
3342 ispec_min[constants.ISPEC_MEM_SIZE] = \
3343 self.be_new.get(constants.BE_MINMEM, None)
3344 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3345 new_disk_template)
3346
3347 if (res_max or res_min):
3348 # FIXME: Improve error message by including information about whether
3349 # the upper or lower limit of the parameter fails the ipolicy.
3350 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3351 (group_info, group_info.name,
3352 utils.CommaJoin(set(res_max + res_min))))
3353 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3354
3355 def _ConvertPlainToDrbd(self, feedback_fn):
3356 """Converts an instance from plain to drbd.
3357
3358 """
3359 feedback_fn("Converting template to drbd")
3360 instance = self.instance
3361 pnode = instance.primary_node
3362 snode = self.op.remote_node
3363
3364 assert instance.disk_template == constants.DT_PLAIN
3365
3366 # create a fake disk info for _GenerateDiskTemplate
3367 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3368 constants.IDISK_VG: d.logical_id[0],
3369 constants.IDISK_NAME: d.name}
3370 for d in instance.disks]
3371 new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
3372 instance.name, pnode, [snode],
3373 disk_info, None, None, 0, feedback_fn,
3374 self.diskparams)
3375 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3376 self.diskparams)
3377 p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3378 s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3379 info = _GetInstanceInfoText(instance)
3380 feedback_fn("Creating additional volumes...")
3381 # first, create the missing data and meta devices
3382 for disk in anno_disks:
3383 # unfortunately this is... not too nice
3384 _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3385 info, True, p_excl_stor)
3386 for child in disk.children:
3387 _CreateSingleBlockDev(self, snode, instance, child, info, True,
3388 s_excl_stor)
3389 # at this stage, all new LVs have been created, we can rename the
3390 # old ones
3391 feedback_fn("Renaming original volumes...")
3392 rename_list = [(o, n.children[0].logical_id)
3393 for (o, n) in zip(instance.disks, new_disks)]
3394 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3395 result.Raise("Failed to rename original LVs")
3396
3397 feedback_fn("Initializing DRBD devices...")
3398 # all child devices are in place, we can now create the DRBD devices
3399 try:
3400 for disk in anno_disks:
3401 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3402 f_create = node == pnode
3403 _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3404 excl_stor)
3405 except errors.GenericError, e:
3406 feedback_fn("Initializing of DRBD devices failed;"
3407 " renaming back original volumes...")
3408 for disk in new_disks:
3409 self.cfg.SetDiskID(disk, pnode)
3410 rename_back_list = [(n.children[0], o.logical_id)
3411 for (n, o) in zip(new_disks, instance.disks)]
3412 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3413 result.Raise("Failed to rename LVs back after error %s" % str(e))
3414 raise
3415
3416 # at this point, the instance has been modified
3417 instance.disk_template = constants.DT_DRBD8
3418 instance.disks = new_disks
3419 self.cfg.Update(instance, feedback_fn)
3420
3421 #