Make QA turn on user-shutdown when testing it
[ganeti-github.git] / qa / qa_instance.py
1 #
2 #
3
4 # Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Instance related QA tests.
32
33 """
34
35 import os
36 import re
37 import time
38
39 from ganeti import utils
40 from ganeti import constants
41 from ganeti import pathutils
42 from ganeti import query
43 from ganeti.netutils import IP4Address
44
45 import qa_config
46 import qa_daemon
47 import qa_utils
48 import qa_error
49
50 from qa_utils import AssertCommand, AssertEqual, AssertIn
51 from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE
52 from qa_instance_utils import CheckSsconfInstanceList, \
53 CreateInstanceDrbd8, \
54 CreateInstanceByDiskTemplate, \
55 CreateInstanceByDiskTemplateOneNode, \
56 GetGenericAddParameters
57
58
59 def _GetDiskStatePath(disk):
60 return "/sys/block/%s/device/state" % disk
61
62
63 def GetInstanceInfo(instance):
64 """Return information about the actual state of an instance.
65
66 @type instance: string
67 @param instance: the instance name
68 @return: a dictionary with the following keys:
69 - "nodes": instance nodes, a list of strings
70 - "volumes": instance volume IDs, a list of strings
71 - "drbd-minors": DRBD minors used by the instance, a dictionary where
72 keys are nodes, and values are lists of integers (or an empty
73 dictionary for non-DRBD instances)
74 - "disk-template": instance disk template
75 - "storage-type": storage type associated with the instance disk template
76
77 """
78 node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
79 # re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
80 # node1.fqdn
81 # node2.fqdn,node3.fqdn
82 # node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
83 # FIXME This works with no more than 2 secondaries
84 re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$")
85
86 info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0]
87 nodes = []
88 for nodeinfo in info["Nodes"]:
89 if "primary" in nodeinfo:
90 nodes.append(nodeinfo["primary"])
91 elif "secondaries" in nodeinfo:
92 nodestr = nodeinfo["secondaries"]
93 if nodestr:
94 m = re_nodelist.match(nodestr)
95 if m:
96 nodes.extend(filter(None, m.groups()))
97 else:
98 nodes.append(nodestr)
99
100 disk_template = info["Disk template"]
101 if not disk_template:
102 raise qa_error.Error("Can't get instance disk template")
103 storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
104
105 re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
106 vols = []
107 drbd_min = {}
108 for (count, diskinfo) in enumerate(info["Disks"]):
109 (dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
110 if dtype == constants.DT_DRBD8:
111 for child in diskinfo["child devices"]:
112 vols.append(child["logical_id"])
113 for key in ["nodeA", "nodeB"]:
114 m = re_drbdnode.match(diskinfo[key])
115 if not m:
116 raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key])
117 node = m.group(1)
118 minor = int(m.group(2))
119 minorlist = drbd_min.setdefault(node, [])
120 minorlist.append(minor)
121 elif dtype == constants.DT_PLAIN:
122 vols.append(diskinfo["logical_id"])
123
124 assert nodes
125 assert len(nodes) < 2 or vols
126 return {
127 "nodes": nodes,
128 "volumes": vols,
129 "drbd-minors": drbd_min,
130 "disk-template": disk_template,
131 "storage-type": storage_type,
132 }
133
134
135 def _DestroyInstanceDisks(instance):
136 """Remove all the backend disks of an instance.
137
138 This is used to simulate HW errors (dead nodes, broken disks...); the
139 configuration of the instance is not affected.
140 @type instance: dictionary
141 @param instance: the instance
142
143 """
144 info = GetInstanceInfo(instance.name)
145 # FIXME: destruction/removal should be part of the disk class
146 if info["storage-type"] == constants.ST_LVM_VG:
147 vols = info["volumes"]
148 for node in info["nodes"]:
149 AssertCommand(["lvremove", "-f"] + vols, node=node)
150 elif info["storage-type"] in (constants.ST_FILE, constants.ST_SHARED_FILE):
151 # Note that this works for both file and sharedfile, and this is intended.
152 storage_dir = qa_config.get("file-storage-dir",
153 pathutils.DEFAULT_FILE_STORAGE_DIR)
154 idir = os.path.join(storage_dir, instance.name)
155 for node in info["nodes"]:
156 AssertCommand(["rm", "-rf", idir], node=node)
157 elif info["storage-type"] == constants.ST_DISKLESS:
158 pass
159
160
161 def _GetInstanceFields(instance, fields):
162 """Get the value of one or more fields of an instance.
163
164 @type instance: string
165 @param instance: instance name
166
167 @type field: list of string
168 @param field: name of the fields
169
170 @rtype: list of string
171 @return: value of the fields
172
173 """
174 master = qa_config.GetMasterNode()
175 infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers",
176 "--separator=:", "--units", "m", "-o",
177 ",".join(fields), instance])
178 return tuple(qa_utils.GetCommandOutput(master.primary, infocmd)
179 .strip()
180 .split(":"))
181
182
183 def _GetInstanceField(instance, field):
184 """Get the value of a field of an instance.
185
186 @type instance: string
187 @param instance: Instance name
188 @type field: string
189 @param field: Name of the field
190 @rtype: string
191
192 """
193 return _GetInstanceFields(instance, [field])[0]
194
195
196 def _GetBoolInstanceField(instance, field):
197 """Get the Boolean value of a field of an instance.
198
199 @type instance: string
200 @param instance: Instance name
201 @type field: string
202 @param field: Name of the field
203 @rtype: bool
204
205 """
206 info_out = _GetInstanceField(instance, field)
207 if info_out == "Y":
208 return True
209 elif info_out == "N":
210 return False
211 else:
212 raise qa_error.Error("Field %s of instance %s has a non-Boolean value:"
213 " %s" % (field, instance, info_out))
214
215
216 def _GetNumInstanceField(instance, field):
217 """Get a numeric value of a field of an instance.
218
219 @type instance: string
220 @param instance: Instance name
221 @type field: string
222 @param field: Name of the field
223 @rtype: int or float
224
225 """
226 info_out = _GetInstanceField(instance, field)
227 try:
228 ret = int(info_out)
229 except ValueError:
230 try:
231 ret = float(info_out)
232 except ValueError:
233 raise qa_error.Error("Field %s of instance %s has a non-numeric value:"
234 " %s" % (field, instance, info_out))
235 return ret
236
237
238 def GetInstanceSpec(instance, spec):
239 """Return the current spec for the given parameter.
240
241 @type instance: string
242 @param instance: Instance name
243 @type spec: string
244 @param spec: one of the supported parameters: "memory-size", "cpu-count",
245 "disk-count", "disk-size", "nic-count"
246 @rtype: tuple
247 @return: (minspec, maxspec); minspec and maxspec can be different only for
248 memory and disk size
249
250 """
251 specmap = {
252 "memory-size": ["be/minmem", "be/maxmem"],
253 "cpu-count": ["vcpus"],
254 "disk-count": ["disk.count"],
255 "disk-size": ["disk.size/ "],
256 "nic-count": ["nic.count"],
257 }
258 # For disks, first we need the number of disks
259 if spec == "disk-size":
260 (numdisk, _) = GetInstanceSpec(instance, "disk-count")
261 fields = ["disk.size/%s" % k for k in range(0, numdisk)]
262 else:
263 assert spec in specmap, "%s not in %s" % (spec, specmap)
264 fields = specmap[spec]
265 values = [_GetNumInstanceField(instance, f) for f in fields]
266 return (min(values), max(values))
267
268
269 def IsFailoverSupported(instance):
270 return instance.disk_template in constants.DTS_MIRRORED
271
272
273 def IsMigrationSupported(instance):
274 return instance.disk_template in constants.DTS_MIRRORED
275
276
277 def IsDiskReplacingSupported(instance):
278 return instance.disk_template == constants.DT_DRBD8
279
280
281 def IsDiskSupported(instance):
282 return instance.disk_template != constants.DT_DISKLESS
283
284
285 def TestInstanceAddWithPlainDisk(nodes, fail=False):
286 """gnt-instance add -t plain"""
287 if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
288 instance = CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN,
289 fail=fail)
290 if not fail:
291 qa_utils.RunInstanceCheck(instance, True)
292 return instance
293
294
295 @InstanceCheck(None, INST_UP, RETURN_VALUE)
296 def TestInstanceAddWithDrbdDisk(nodes):
297 """gnt-instance add -t drbd"""
298 if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates():
299 return CreateInstanceDrbd8(nodes)
300
301
302 @InstanceCheck(None, INST_UP, RETURN_VALUE)
303 def TestInstanceAddFile(nodes):
304 """gnt-instance add -t file"""
305 assert len(nodes) == 1
306 if constants.DT_FILE in qa_config.GetEnabledDiskTemplates():
307 return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
308
309
310 @InstanceCheck(None, INST_UP, RETURN_VALUE)
311 def TestInstanceAddSharedFile(nodes):
312 """gnt-instance add -t sharedfile"""
313 assert len(nodes) == 1
314 if constants.DT_SHARED_FILE in qa_config.GetEnabledDiskTemplates():
315 return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_SHARED_FILE)
316
317
318 @InstanceCheck(None, INST_UP, RETURN_VALUE)
319 def TestInstanceAddDiskless(nodes):
320 """gnt-instance add -t diskless"""
321 assert len(nodes) == 1
322 if constants.DT_DISKLESS in qa_config.GetEnabledDiskTemplates():
323 return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
324
325
326 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
327 def TestInstanceRemove(instance):
328 """gnt-instance remove"""
329 AssertCommand(["gnt-instance", "remove", "-f", instance.name])
330
331
332 @InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
333 def TestInstanceStartup(instance):
334 """gnt-instance startup"""
335 AssertCommand(["gnt-instance", "startup", instance.name])
336
337
338 @InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
339 def TestInstanceShutdown(instance):
340 """gnt-instance shutdown"""
341 AssertCommand(["gnt-instance", "shutdown", instance.name])
342
343
344 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
345 def TestInstanceReboot(instance):
346 """gnt-instance reboot"""
347 options = qa_config.get("options", {})
348 reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
349 name = instance.name
350 for rtype in reboot_types:
351 AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name])
352
353 AssertCommand(["gnt-instance", "shutdown", name])
354 qa_utils.RunInstanceCheck(instance, False)
355 AssertCommand(["gnt-instance", "reboot", name])
356
357 master = qa_config.GetMasterNode()
358 cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name]
359 result_output = qa_utils.GetCommandOutput(master.primary,
360 utils.ShellQuoteArgs(cmd))
361 AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
362
363
364 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
365 def TestInstanceReinstall(instance):
366 """gnt-instance reinstall"""
367 if instance.disk_template == constants.DT_DISKLESS:
368 print qa_utils.FormatInfo("Test not supported for diskless instances")
369 return
370
371 qa_storage = qa_config.get("qa-storage")
372
373 if qa_storage is None:
374 print qa_utils.FormatInfo("Test not supported because the additional QA"
375 " storage is not available")
376 else:
377 # Reinstall with OS image from QA storage
378 url = "%s/busybox.img" % qa_storage
379 AssertCommand(["gnt-instance", "reinstall",
380 "--os-parameters", "os-image=" + url,
381 "-f", instance.name])
382
383 # Reinstall with OS image as local file on the node
384 pnode = _GetInstanceField(instance.name, "pnode")
385
386 cmd = ("wget -O busybox.img %s &> /dev/null &&"
387 " echo $(pwd)/busybox.img") % url
388 image = qa_utils.GetCommandOutput(pnode, cmd).strip()
389
390 AssertCommand(["gnt-instance", "reinstall",
391 "--os-parameters", "os-image=" + image,
392 "-f", instance.name])
393
394 # Reinstall non existing local file
395 AssertCommand(["gnt-instance", "reinstall",
396 "--os-parameters", "os-image=NonExistantOsForQa",
397 "-f", instance.name], fail=True)
398
399 # Reinstall non existing URL
400 AssertCommand(["gnt-instance", "reinstall",
401 "--os-parameters", "os-image=http://NonExistantOsForQa",
402 "-f", instance.name], fail=True)
403
404 # Reinstall using OS scripts
405 AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
406
407 # Test with non-existant OS definition
408 AssertCommand(["gnt-instance", "reinstall", "-f",
409 "--os-type=NonExistantOsForQa",
410 instance.name],
411 fail=True)
412
413 # Test with existing OS but invalid variant
414 AssertCommand(["gnt-instance", "reinstall", "-f", "-o", "debootstrap+ola",
415 instance.name],
416 fail=True)
417
418 # Test with existing OS but invalid variant
419 AssertCommand(["gnt-instance", "reinstall", "-f", "-o", "debian-image+ola",
420 instance.name],
421 fail=True)
422
423
424 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
425 def TestInstanceRenameAndBack(rename_source, rename_target):
426 """gnt-instance rename
427
428 This must leave the instance with the original name, not the target
429 name.
430
431 """
432 CheckSsconfInstanceList(rename_source)
433
434 # first do a rename to a different actual name, expecting it to fail
435 qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
436 try:
437 AssertCommand(["gnt-instance", "rename", rename_source, rename_target],
438 fail=True)
439 CheckSsconfInstanceList(rename_source)
440 finally:
441 qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
442
443 info = GetInstanceInfo(rename_source)
444
445 # Check instance volume tags correctly updated. Note that this check is lvm
446 # specific, so we skip it for non-lvm-based instances.
447 # FIXME: This will need updating when instances will be able to have
448 # different disks living on storage pools with etherogeneous storage types.
449 # FIXME: This check should be put inside the disk/storage class themselves,
450 # rather than explicitly called here.
451 if info["storage-type"] == constants.ST_LVM_VG:
452 # In the lvm world we can check for tags on the logical volume
453 tags_cmd = ("lvs -o tags --noheadings %s | grep " %
454 (" ".join(info["volumes"]), ))
455 else:
456 # Other storage types don't have tags, so we use an always failing command,
457 # to make sure it never gets executed
458 tags_cmd = "false"
459
460 # and now rename instance to rename_target...
461 AssertCommand(["gnt-instance", "rename", rename_source, rename_target])
462 CheckSsconfInstanceList(rename_target)
463 qa_utils.RunInstanceCheck(rename_source, False)
464 qa_utils.RunInstanceCheck(rename_target, False)
465
466 # NOTE: tags might not be the exactly as the instance name, due to
467 # charset restrictions; hence the test might be flaky
468 if (rename_source != rename_target and
469 info["storage-type"] == constants.ST_LVM_VG):
470 for node in info["nodes"]:
471 AssertCommand(tags_cmd + rename_source, node=node, fail=True)
472 AssertCommand(tags_cmd + rename_target, node=node, fail=False)
473
474 # and back
475 AssertCommand(["gnt-instance", "rename", rename_target, rename_source])
476 CheckSsconfInstanceList(rename_source)
477 qa_utils.RunInstanceCheck(rename_target, False)
478
479 if (rename_source != rename_target and
480 info["storage-type"] == constants.ST_LVM_VG):
481 for node in info["nodes"]:
482 AssertCommand(tags_cmd + rename_source, node=node, fail=False)
483 AssertCommand(tags_cmd + rename_target, node=node, fail=True)
484
485
486 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
487 def TestInstanceFailover(instance):
488 """gnt-instance failover"""
489 if not IsFailoverSupported(instance):
490 print qa_utils.FormatInfo("Instance doesn't support failover, skipping"
491 " test")
492 return
493
494 cmd = ["gnt-instance", "failover", "--force", instance.name]
495
496 # failover ...
497 AssertCommand(cmd)
498 qa_utils.RunInstanceCheck(instance, True)
499
500 # ... and back
501 AssertCommand(cmd)
502
503
504 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
505 def TestInstanceMigrate(instance, toggle_always_failover=True):
506 """gnt-instance migrate"""
507 if not IsMigrationSupported(instance):
508 print qa_utils.FormatInfo("Instance doesn't support migration, skipping"
509 " test")
510 return
511
512 cmd = ["gnt-instance", "migrate", "--force", instance.name]
513 af_par = constants.BE_ALWAYS_FAILOVER
514 af_field = "be/" + constants.BE_ALWAYS_FAILOVER
515 af_init_val = _GetBoolInstanceField(instance.name, af_field)
516
517 # migrate ...
518 AssertCommand(cmd)
519 # TODO: Verify the choice between failover and migration
520 qa_utils.RunInstanceCheck(instance, True)
521
522 # ... and back (possibly with always_failover toggled)
523 if toggle_always_failover:
524 AssertCommand(["gnt-instance", "modify", "-B",
525 ("%s=%s" % (af_par, not af_init_val)),
526 instance.name])
527 AssertCommand(cmd)
528 # TODO: Verify the choice between failover and migration
529 qa_utils.RunInstanceCheck(instance, True)
530 if toggle_always_failover:
531 AssertCommand(["gnt-instance", "modify", "-B",
532 ("%s=%s" % (af_par, af_init_val)), instance.name])
533
534 # TODO: Split into multiple tests
535 AssertCommand(["gnt-instance", "shutdown", instance.name])
536 qa_utils.RunInstanceCheck(instance, False)
537 AssertCommand(cmd, fail=True)
538 AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover",
539 instance.name])
540 AssertCommand(["gnt-instance", "start", instance.name])
541 AssertCommand(cmd)
542 # @InstanceCheck enforces the check that the instance is running
543 qa_utils.RunInstanceCheck(instance, True)
544
545 AssertCommand(["gnt-instance", "modify", "-B",
546 ("%s=%s" %
547 (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)),
548 instance.name])
549
550 AssertCommand(cmd)
551 qa_utils.RunInstanceCheck(instance, True)
552 # TODO: Verify that a failover has been done instead of a migration
553
554 # TODO: Verify whether the default value is restored here (not hardcoded)
555 AssertCommand(["gnt-instance", "modify", "-B",
556 ("%s=%s" %
557 (constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)),
558 instance.name])
559
560 AssertCommand(cmd)
561 qa_utils.RunInstanceCheck(instance, True)
562
563
564 def TestInstanceInfo(instance):
565 """gnt-instance info"""
566 AssertCommand(["gnt-instance", "info", instance.name])
567
568
569 def _TestKVMHotplug(instance):
570 """Tests hotplug modification commands, noting that they
571
572 """
573 args_to_try = [
574 ["--net", "-1:add", "--hotplug"],
575 ["--net", "-1:modify,mac=aa:bb:cc:dd:ee:ff", "--hotplug", "--force"],
576 ["--net", "-1:remove", "--hotplug"],
577 ["--disk", "-1:add,size=1G", "--hotplug"],
578 ["--disk", "-1:remove", "--hotplug"],
579 ]
580 for alist in args_to_try:
581 _, stdout, stderr = \
582 AssertCommand(["gnt-instance", "modify"] + alist + [instance.name])
583 if "failed" in stdout or "failed" in stderr:
584 raise qa_error.Error("Hotplugging command failed; please check output"
585 " for further information")
586
587
588 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
589 def TestInstanceModify(instance):
590 """gnt-instance modify"""
591 default_hv = qa_config.GetDefaultHypervisor()
592
593 # Assume /sbin/init exists on all systems
594 test_kernel = "/sbin/init"
595 test_initrd = test_kernel
596
597 orig_maxmem = qa_config.get(constants.BE_MAXMEM)
598 orig_minmem = qa_config.get(constants.BE_MINMEM)
599 #orig_bridge = qa_config.get("bridge", "xen-br0")
600
601 args = [
602 ["-B", "%s=128" % constants.BE_MINMEM],
603 ["-B", "%s=128" % constants.BE_MAXMEM],
604 ["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem,
605 constants.BE_MAXMEM, orig_maxmem)],
606 ["-B", "%s=2" % constants.BE_VCPUS],
607 ["-B", "%s=1" % constants.BE_VCPUS],
608 ["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)],
609 ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)],
610 ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)],
611
612 ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)],
613 ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)],
614
615 # TODO: bridge tests
616 #["--bridge", "xen-br1"],
617 #["--bridge", orig_bridge],
618 ]
619
620 if default_hv == constants.HT_XEN_PVM:
621 args.extend([
622 ["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)],
623 ["-H", "no_%s" % (constants.HV_INITRD_PATH, )],
624 ["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)],
625 ])
626 elif default_hv == constants.HT_XEN_HVM:
627 args.extend([
628 ["-H", "%s=acn" % constants.HV_BOOT_ORDER],
629 ["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)],
630 ])
631 elif default_hv == constants.HT_KVM and \
632 qa_config.TestEnabled("instance-device-hotplug"):
633 _TestKVMHotplug(instance)
634
635 url = "http://example.com/busybox.img"
636 args.extend([
637 ["--os-parameters", "os-image=" + url],
638 ["--os-parameters", "os-image=default"]
639 ])
640
641 for alist in args:
642 AssertCommand(["gnt-instance", "modify"] + alist + [instance.name])
643
644 # check no-modify
645 AssertCommand(["gnt-instance", "modify", instance.name], fail=True)
646
647 # Marking offline while instance is running must fail...
648 AssertCommand(["gnt-instance", "modify", "--offline", instance.name],
649 fail=True)
650
651 # ...while making it online fails too (needs to be offline first)
652 AssertCommand(["gnt-instance", "modify", "--online", instance.name],
653 fail=True)
654
655
656 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
657 def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode):
658 """gnt-instance modify --new-primary
659
660 This will leave the instance on its original primary node, not other node.
661
662 """
663 if instance.disk_template != constants.DT_FILE:
664 print qa_utils.FormatInfo("Test only supported for the file disk template")
665 return
666
667 cluster_name = qa_config.get("name")
668
669 name = instance.name
670 current = currentnode.primary
671 other = othernode.primary
672
673 filestorage = qa_config.get("file-storage-dir",
674 pathutils.DEFAULT_FILE_STORAGE_DIR)
675 disk = os.path.join(filestorage, name)
676
677 AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name],
678 fail=True)
679 AssertCommand(["gnt-instance", "shutdown", name])
680 AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" %
681 pathutils.SSH_KNOWN_HOSTS_FILE,
682 "-oCheckHostIp=no", "-oStrictHostKeyChecking=yes",
683 "-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name,
684 "-r", disk, "%s:%s" % (other, filestorage)], node=current)
685 AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name])
686 AssertCommand(["gnt-instance", "startup", name])
687
688 # and back
689 AssertCommand(["gnt-instance", "shutdown", name])
690 AssertCommand(["rm", "-rf", disk], node=other)
691 AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name])
692 AssertCommand(["gnt-instance", "startup", name])
693
694
695 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
696 def TestInstanceStoppedModify(instance):
697 """gnt-instance modify (stopped instance)"""
698 name = instance.name
699
700 # Instance was not marked offline; try marking it online once more
701 AssertCommand(["gnt-instance", "modify", "--online", name])
702
703 # Mark instance as offline
704 AssertCommand(["gnt-instance", "modify", "--offline", name])
705
706 # When the instance is offline shutdown should only work with --force,
707 # while start should never work
708 AssertCommand(["gnt-instance", "shutdown", name], fail=True)
709 AssertCommand(["gnt-instance", "shutdown", "--force", name])
710 AssertCommand(["gnt-instance", "start", name], fail=True)
711 AssertCommand(["gnt-instance", "start", "--force", name], fail=True)
712
713 # Also do offline to offline
714 AssertCommand(["gnt-instance", "modify", "--offline", name])
715
716 # And online again
717 AssertCommand(["gnt-instance", "modify", "--online", name])
718
719
720 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
721 def TestInstanceConvertDiskToPlain(instance, inodes):
722 """gnt-instance modify -t"""
723 name = instance.name
724
725 template = instance.disk_template
726 if template != constants.DT_DRBD8:
727 print qa_utils.FormatInfo("Unsupported template %s, skipping conversion"
728 " test" % template)
729 return
730
731 assert len(inodes) == 2
732 AssertCommand(["gnt-instance", "modify", "-t", constants.DT_PLAIN, name])
733 AssertCommand(["gnt-instance", "modify", "-t", constants.DT_DRBD8,
734 "-n", inodes[1].primary, name])
735
736
737 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
738 def TestInstanceModifyDisks(instance):
739 """gnt-instance modify --disk"""
740 if not IsDiskSupported(instance):
741 print qa_utils.FormatInfo("Instance doesn't support disks, skipping test")
742 return
743
744 disk_conf = qa_config.GetDiskOptions()[-1]
745 size = disk_conf.get("size")
746 name = instance.name
747 build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name]
748 if qa_config.AreSpindlesSupported():
749 spindles = disk_conf.get("spindles")
750 spindles_supported = True
751 else:
752 # Any number is good for spindles in this case
753 spindles = 1
754 spindles_supported = False
755 AssertCommand(build_cmd("add:size=%s,spindles=%s" % (size, spindles)),
756 fail=not spindles_supported)
757 AssertCommand(build_cmd("add:size=%s" % size),
758 fail=spindles_supported)
759 # Exactly one of the above commands has succeded, so we need one remove
760 AssertCommand(build_cmd("remove"))
761
762
763 @InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
764 def TestInstanceGrowDisk(instance):
765 """gnt-instance grow-disk"""
766 if instance.disk_template == constants.DT_DISKLESS:
767 print qa_utils.FormatInfo("Test not supported for diskless instances")
768 return
769
770 name = instance.name
771 disks = qa_config.GetDiskOptions()
772 all_size = [d.get("size") for d in disks]
773 all_grow = [d.get("growth") for d in disks]
774
775 if not all_grow:
776 # missing disk sizes but instance grow disk has been enabled,
777 # let's set fixed/nomimal growth
778 all_grow = ["128M" for _ in all_size]
779
780 for idx, (size, grow) in enumerate(zip(all_size, all_grow)):
781 # succeed in grow by amount
782 AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow])
783 # fail in grow to the old size
784 AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx),
785 size], fail=True)
786 # succeed to grow to old size + 2 * growth
787 int_size = utils.ParseUnit(size)
788 int_grow = utils.ParseUnit(grow)
789 AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx),
790 str(int_size + 2 * int_grow)])
791
792
793 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
794 def TestInstanceDeviceNames(instance):
795 if instance.disk_template == constants.DT_DISKLESS:
796 print qa_utils.FormatInfo("Test not supported for diskless instances")
797 return
798
799 name = instance.name
800 for dev_type in ["disk", "net"]:
801 if dev_type == "disk":
802 options = ",size=512M"
803 if qa_config.AreSpindlesSupported():
804 options += ",spindles=1"
805 else:
806 options = ""
807 # succeed in adding a device named 'test_device'
808 AssertCommand(["gnt-instance", "modify",
809 "--%s=-1:add,name=test_device%s" % (dev_type, options),
810 name])
811 # succeed in removing the 'test_device'
812 AssertCommand(["gnt-instance", "modify",
813 "--%s=test_device:remove" % dev_type,
814 name])
815 # fail to add two devices with the same name
816 AssertCommand(["gnt-instance", "modify",
817 "--%s=-1:add,name=test_device%s" % (dev_type, options),
818 "--%s=-1:add,name=test_device%s" % (dev_type, options),
819 name], fail=True)
820 # fail to add a device with invalid name
821 AssertCommand(["gnt-instance", "modify",
822 "--%s=-1:add,name=2%s" % (dev_type, options),
823 name], fail=True)
824 # Rename disks
825 disks = qa_config.GetDiskOptions()
826 disk_names = [d.get("name") for d in disks]
827 for idx, disk_name in enumerate(disk_names):
828 # Refer to disk by idx
829 AssertCommand(["gnt-instance", "modify",
830 "--disk=%s:modify,name=renamed" % idx,
831 name])
832 # Refer to by name and rename to original name
833 AssertCommand(["gnt-instance", "modify",
834 "--disk=renamed:modify,name=%s" % disk_name,
835 name])
836 if len(disks) >= 2:
837 # fail in renaming to disks to the same name
838 AssertCommand(["gnt-instance", "modify",
839 "--disk=0:modify,name=same_name",
840 "--disk=1:modify,name=same_name",
841 name], fail=True)
842
843
844 def TestInstanceList():
845 """gnt-instance list"""
846 qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
847
848
849 def TestInstanceListFields():
850 """gnt-instance list-fields"""
851 qa_utils.GenericQueryFieldsTest("gnt-instance", query.INSTANCE_FIELDS.keys())
852
853
854 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
855 def TestInstanceConsole(instance):
856 """gnt-instance console"""
857 AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name])
858
859
860 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
861 def TestReplaceDisks(instance, curr_nodes, other_nodes):
862 """gnt-instance replace-disks"""
863 def buildcmd(args):
864 cmd = ["gnt-instance", "replace-disks"]
865 cmd.extend(args)
866 cmd.append(instance.name)
867 return cmd
868
869 if not IsDiskReplacingSupported(instance):
870 print qa_utils.FormatInfo("Instance doesn't support disk replacing,"
871 " skipping test")
872 return
873
874 # Currently all supported templates have one primary and one secondary node
875 assert len(curr_nodes) == 2
876 snode = curr_nodes[1]
877 assert len(other_nodes) == 1
878 othernode = other_nodes[0]
879
880 options = qa_config.get("options", {})
881 use_ialloc = options.get("use-iallocators", True)
882 for data in [
883 ["-p"],
884 ["-s"],
885 # A placeholder; the actual command choice depends on use_ialloc
886 None,
887 # Restore the original secondary
888 ["--new-secondary=%s" % snode.primary],
889 ]:
890 if data is None:
891 if use_ialloc:
892 data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
893 else:
894 data = ["--new-secondary=%s" % othernode.primary]
895 AssertCommand(buildcmd(data))
896
897 AssertCommand(buildcmd(["-a"]))
898 AssertCommand(["gnt-instance", "stop", instance.name])
899 AssertCommand(buildcmd(["-a"]), fail=True)
900 AssertCommand(["gnt-instance", "activate-disks", instance.name])
901 AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
902 instance.name])
903 AssertCommand(buildcmd(["-a"]))
904 AssertCommand(["gnt-instance", "start", instance.name])
905
906
907 def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
908 destroy=True):
909 """Execute gnt-instance recreate-disks and check the result
910
911 @param cmdargs: Arguments (instance name excluded)
912 @param instance: Instance to operate on
913 @param fail: True if the command is expected to fail
914 @param check: If True and fail is False, check that the disks work
915 @prama destroy: If True, destroy the old disks first
916
917 """
918 if destroy:
919 _DestroyInstanceDisks(instance)
920 AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs +
921 [instance.name]), fail)
922 if not fail and check:
923 # Quick check that the disks are there
924 AssertCommand(["gnt-instance", "activate-disks", instance.name])
925 AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
926 instance.name])
927 AssertCommand(["gnt-instance", "deactivate-disks", instance.name])
928
929
930 def _BuildRecreateDisksOpts(en_disks, with_spindles, with_growth,
931 spindles_supported):
932 if with_spindles:
933 if spindles_supported:
934 if with_growth:
935 build_spindles_opt = (lambda disk:
936 ",spindles=%s" %
937 (disk["spindles"] + disk["spindles-growth"]))
938 else:
939 build_spindles_opt = (lambda disk:
940 ",spindles=%s" % disk["spindles"])
941 else:
942 build_spindles_opt = (lambda _: ",spindles=1")
943 else:
944 build_spindles_opt = (lambda _: "")
945 if with_growth:
946 build_size_opt = (lambda disk:
947 "size=%s" % (utils.ParseUnit(disk["size"]) +
948 utils.ParseUnit(disk["growth"])))
949 else:
950 build_size_opt = (lambda disk: "size=%s" % disk["size"])
951 build_disk_opt = (lambda (idx, disk):
952 "--disk=%s:%s%s" % (idx, build_size_opt(disk),
953 build_spindles_opt(disk)))
954 return map(build_disk_opt, en_disks)
955
956
957 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
958 def TestRecreateDisks(instance, inodes, othernodes):
959 """gnt-instance recreate-disks
960
961 @param instance: Instance to work on
962 @param inodes: List of the current nodes of the instance
963 @param othernodes: list/tuple of nodes where to temporarily recreate disks
964
965 """
966 options = qa_config.get("options", {})
967 use_ialloc = options.get("use-iallocators", True)
968 other_seq = ":".join([n.primary for n in othernodes])
969 orig_seq = ":".join([n.primary for n in inodes])
970 # These fail because the instance is running
971 _AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
972 if use_ialloc:
973 _AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False)
974 else:
975 _AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
976 AssertCommand(["gnt-instance", "stop", instance.name])
977 # Disks exist: this should fail
978 _AssertRecreateDisks([], instance, fail=True, destroy=False)
979 # Unsupported spindles parameters: fail
980 if not qa_config.AreSpindlesSupported():
981 _AssertRecreateDisks(["--disk=0:spindles=2"], instance,
982 fail=True, destroy=False)
983 # Recreate disks in place
984 _AssertRecreateDisks([], instance)
985 # Move disks away
986 if use_ialloc:
987 _AssertRecreateDisks(["-I", "hail"], instance)
988 # Move disks somewhere else
989 _AssertRecreateDisks(["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT],
990 instance)
991 else:
992 _AssertRecreateDisks(["-n", other_seq], instance)
993 # Move disks back
994 _AssertRecreateDisks(["-n", orig_seq], instance)
995 # Recreate resized disks
996 # One of the two commands fails because either spindles are given when they
997 # should not or vice versa
998 alldisks = qa_config.GetDiskOptions()
999 spindles_supported = qa_config.AreSpindlesSupported()
1000 disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), True, True,
1001 spindles_supported)
1002 _AssertRecreateDisks(disk_opts, instance, destroy=True,
1003 fail=not spindles_supported)
1004 disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), False, True,
1005 spindles_supported)
1006 _AssertRecreateDisks(disk_opts, instance, destroy=False,
1007 fail=spindles_supported)
1008 # Recreate the disks one by one (with the original size)
1009 for (idx, disk) in enumerate(alldisks):
1010 # Only the first call should destroy all the disk
1011 destroy = (idx == 0)
1012 # Again, one of the two commands is expected to fail
1013 disk_opts = _BuildRecreateDisksOpts([(idx, disk)], True, False,
1014 spindles_supported)
1015 _AssertRecreateDisks(disk_opts, instance, destroy=destroy, check=False,
1016 fail=not spindles_supported)
1017 disk_opts = _BuildRecreateDisksOpts([(idx, disk)], False, False,
1018 spindles_supported)
1019 _AssertRecreateDisks(disk_opts, instance, destroy=False, check=False,
1020 fail=spindles_supported)
1021 # This and InstanceCheck decoration check that the disks are working
1022 AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
1023 AssertCommand(["gnt-instance", "start", instance.name])
1024
1025
1026 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
1027 def TestInstanceExport(instance, node):
1028 """gnt-backup export -n ..."""
1029 name = instance.name
1030 # Export does not work for file-based templates, thus we skip the test
1031 if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
1032 return
1033 AssertCommand(["gnt-backup", "export", "-n", node.primary, name])
1034 return qa_utils.ResolveInstanceName(name)
1035
1036
1037 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
1038 def TestInstanceExportWithRemove(instance, node):
1039 """gnt-backup export --remove-instance"""
1040 AssertCommand(["gnt-backup", "export", "-n", node.primary,
1041 "--remove-instance", instance.name])
1042
1043
1044 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
1045 def TestInstanceExportNoTarget(instance):
1046 """gnt-backup export (without target node, should fail)"""
1047 AssertCommand(["gnt-backup", "export", instance.name], fail=True)
1048
1049
1050 @InstanceCheck(None, INST_DOWN, FIRST_ARG)
1051 def TestInstanceImport(newinst, node, expnode, name):
1052 """gnt-backup import"""
1053 templ = constants.DT_PLAIN
1054 if not qa_config.IsTemplateSupported(templ):
1055 return
1056 cmd = (["gnt-backup", "import",
1057 "--disk-template=%s" % templ,
1058 "--no-ip-check",
1059 "--src-node=%s" % expnode.primary,
1060 "--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
1061 "--node=%s" % node.primary] +
1062 GetGenericAddParameters(newinst, templ,
1063 force_mac=constants.VALUE_GENERATE))
1064 cmd.append(newinst.name)
1065 AssertCommand(cmd)
1066 newinst.SetDiskTemplate(templ)
1067
1068
1069 def TestBackupList(expnode):
1070 """gnt-backup list"""
1071 AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary])
1072
1073 qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
1074 namefield=None, test_unknown=False)
1075
1076
1077 def TestBackupListFields():
1078 """gnt-backup list-fields"""
1079 qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
1080
1081
1082 def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online):
1083 """gnt-instance remove with an off-line node
1084
1085 @param instance: instance
1086 @param snode: secondary node, to be set offline
1087 @param set_offline: function to call to set the node off-line
1088 @param set_online: function to call to set the node on-line
1089
1090 """
1091 info = GetInstanceInfo(instance.name)
1092 set_offline(snode)
1093 try:
1094 TestInstanceRemove(instance)
1095 finally:
1096 set_online(snode)
1097
1098 # Clean up the disks on the offline node, if necessary
1099 if instance.disk_template not in constants.DTS_EXT_MIRROR:
1100 # FIXME: abstract the cleanup inside the disks
1101 if info["storage-type"] == constants.ST_LVM_VG:
1102 for minor in info["drbd-minors"][snode.primary]:
1103 # DRBD 8.3 syntax comes first, then DRBD 8.4 syntax. The 8.4 syntax
1104 # relies on the fact that we always create a resources for each minor,
1105 # and that this resources is always named resource{minor}.
1106 # As 'drbdsetup 0 down' does return success (even though that's invalid
1107 # syntax), we always have to perform both commands and ignore the
1108 # output.
1109 drbd_shutdown_cmd = \
1110 "(drbdsetup %d down >/dev/null 2>&1;" \
1111 " drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
1112 (minor, minor)
1113 AssertCommand(drbd_shutdown_cmd, node=snode)
1114 AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)
1115 elif info["storage-type"] == constants.ST_FILE:
1116 filestorage = qa_config.get("file-storage-dir",
1117 pathutils.DEFAULT_FILE_STORAGE_DIR)
1118 disk = os.path.join(filestorage, instance.name)
1119 AssertCommand(["rm", "-rf", disk], node=snode)
1120
1121
1122 def TestInstanceCreationRestrictedByDiskTemplates():
1123 """Test adding instances for disabled disk templates."""
1124 if qa_config.TestEnabled("cluster-exclusive-storage"):
1125 # These tests are valid only for non-exclusive storage
1126 return
1127
1128 enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
1129 nodes = qa_config.AcquireManyNodes(2)
1130
1131 # Setup the cluster with the enabled_disk_templates
1132 AssertCommand(
1133 ["gnt-cluster", "modify",
1134 "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
1135 "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)],
1136 fail=False)
1137
1138 # Test instance creation for enabled disk templates
1139 for disk_template in enabled_disk_templates:
1140 instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
1141 TestInstanceRemove(instance)
1142 instance.Release()
1143
1144 # Test that instance creation fails for disabled disk templates
1145 disabled_disk_templates = list(constants.DISK_TEMPLATES
1146 - set(enabled_disk_templates))
1147 for disk_template in disabled_disk_templates:
1148 instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
1149
1150 # Test instance creation for after disabling enabled disk templates
1151 if (len(enabled_disk_templates) > 1):
1152 # Partition the disk templates, enable them separately and check if the
1153 # disabled ones cannot be used by instances.
1154 middle = len(enabled_disk_templates) / 2
1155 templates1 = enabled_disk_templates[:middle]
1156 templates2 = enabled_disk_templates[middle:]
1157
1158 for (enabled, disabled) in [(templates1, templates2),
1159 (templates2, templates1)]:
1160 AssertCommand(["gnt-cluster", "modify",
1161 "--enabled-disk-templates=%s" % ",".join(enabled),
1162 "--ipolicy-disk-templates=%s" % ",".join(enabled)],
1163 fail=False)
1164 for disk_template in disabled:
1165 CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
1166 elif (len(enabled_disk_templates) == 1):
1167 # If only one disk template is enabled in the QA config, we have to enable
1168 # some other templates in order to test if the disabling the only enabled
1169 # disk template prohibits creating instances of that template.
1170 other_disk_templates = list(
1171 set([constants.DT_DISKLESS, constants.DT_BLOCK]) -
1172 set(enabled_disk_templates))
1173 AssertCommand(["gnt-cluster", "modify",
1174 "--enabled-disk-templates=%s" %
1175 ",".join(other_disk_templates),
1176 "--ipolicy-disk-templates=%s" %
1177 ",".join(other_disk_templates)],
1178 fail=False)
1179 CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True)
1180 else:
1181 raise qa_error.Error("Please enable at least one disk template"
1182 " in your QA setup.")
1183
1184 # Restore initially enabled disk templates
1185 AssertCommand(["gnt-cluster", "modify",
1186 "--enabled-disk-templates=%s" %
1187 ",".join(enabled_disk_templates),
1188 "--ipolicy-disk-templates=%s" %
1189 ",".join(enabled_disk_templates)],
1190 fail=False)
1191
1192
1193 def _AssertInstance(instance, status, admin_state, admin_state_source):
1194 x, y, z = \
1195 _GetInstanceFields(instance.name,
1196 ["status", "admin_state", "admin_state_source"])
1197
1198 AssertEqual(x, status)
1199 AssertEqual(y, admin_state)
1200 AssertEqual(z, admin_state_source)
1201
1202
1203 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
1204 def _TestInstanceUserDown(instance, hv_shutdown_fn):
1205 """Test different combinations of user shutdown"""
1206
1207 # 1. User shutdown
1208 # 2. Instance start
1209 hv_shutdown_fn()
1210
1211 _AssertInstance(instance,
1212 constants.INSTST_USERDOWN,
1213 constants.ADMINST_UP,
1214 constants.ADMIN_SOURCE)
1215
1216 AssertCommand(["gnt-instance", "start", instance.name])
1217
1218 _AssertInstance(instance,
1219 constants.INSTST_RUNNING,
1220 constants.ADMINST_UP,
1221 constants.ADMIN_SOURCE)
1222
1223 # 1. User shutdown
1224 # 2. Watcher cleanup
1225 # 3. Instance start
1226 hv_shutdown_fn()
1227
1228 _AssertInstance(instance,
1229 constants.INSTST_USERDOWN,
1230 constants.ADMINST_UP,
1231 constants.ADMIN_SOURCE)
1232
1233 qa_daemon.RunWatcherDaemon()
1234
1235 _AssertInstance(instance,
1236 constants.INSTST_USERDOWN,
1237 constants.ADMINST_DOWN,
1238 constants.USER_SOURCE)
1239
1240 AssertCommand(["gnt-instance", "start", instance.name])
1241
1242 _AssertInstance(instance,
1243 constants.INSTST_RUNNING,
1244 constants.ADMINST_UP,
1245 constants.ADMIN_SOURCE)
1246
1247 # 1. User shutdown
1248 # 2. Watcher cleanup
1249 # 3. Instance stop
1250 # 4. Instance start
1251 hv_shutdown_fn()
1252
1253 _AssertInstance(instance,
1254 constants.INSTST_USERDOWN,
1255 constants.ADMINST_UP,
1256 constants.ADMIN_SOURCE)
1257
1258 qa_daemon.RunWatcherDaemon()
1259
1260 _AssertInstance(instance,
1261 constants.INSTST_USERDOWN,
1262 constants.ADMINST_DOWN,
1263 constants.USER_SOURCE)
1264
1265 AssertCommand(["gnt-instance", "shutdown", instance.name])
1266
1267 _AssertInstance(instance,
1268 constants.INSTST_ADMINDOWN,
1269 constants.ADMINST_DOWN,
1270 constants.ADMIN_SOURCE)
1271
1272 AssertCommand(["gnt-instance", "start", instance.name])
1273
1274 _AssertInstance(instance,
1275 constants.INSTST_RUNNING,
1276 constants.ADMINST_UP,
1277 constants.ADMIN_SOURCE)
1278
1279 # 1. User shutdown
1280 # 2. Instance stop
1281 # 3. Instance start
1282 hv_shutdown_fn()
1283
1284 _AssertInstance(instance,
1285 constants.INSTST_USERDOWN,
1286 constants.ADMINST_UP,
1287 constants.ADMIN_SOURCE)
1288
1289 AssertCommand(["gnt-instance", "shutdown", instance.name])
1290
1291 _AssertInstance(instance,
1292 constants.INSTST_ADMINDOWN,
1293 constants.ADMINST_DOWN,
1294 constants.ADMIN_SOURCE)
1295
1296 AssertCommand(["gnt-instance", "start", instance.name])
1297
1298 _AssertInstance(instance,
1299 constants.INSTST_RUNNING,
1300 constants.ADMINST_UP,
1301 constants.ADMIN_SOURCE)
1302
1303
1304 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
1305 def _TestInstanceUserDownXen(instance):
1306 primary = _GetInstanceField(instance.name, "pnode")
1307 fn = lambda: AssertCommand(["xm", "shutdown", "-w", instance.name],
1308 node=primary)
1309
1310 AssertCommand(["gnt-cluster", "modify", "--user-shutdown=true"])
1311 _TestInstanceUserDown(instance, fn)
1312 AssertCommand(["gnt-cluster", "modify", "--user-shutdown=false"])
1313
1314
1315 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
1316 def _TestInstanceUserDownKvm(instance):
1317 def _StopKVMInstance():
1318 AssertCommand("pkill -f \"\\-name %s\"" % instance.name, node=primary)
1319 time.sleep(10)
1320
1321 AssertCommand(["gnt-cluster", "modify", "--user-shutdown=true"])
1322 AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=true",
1323 instance.name])
1324
1325 # The instance needs to reboot not because the 'user_shutdown'
1326 # parameter was modified but because the KVM daemon need to be
1327 # started, given that the instance was first created with user
1328 # shutdown disabled.
1329 AssertCommand(["gnt-instance", "reboot", instance.name])
1330
1331 primary = _GetInstanceField(instance.name, "pnode")
1332 _TestInstanceUserDown(instance, _StopKVMInstance)
1333
1334 AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=false",
1335 instance.name])
1336 AssertCommand(["gnt-cluster", "modify", "--user-shutdown=false"])
1337
1338
1339 def TestInstanceUserDown(instance):
1340 """Tests user shutdown"""
1341 enabled_hypervisors = qa_config.GetEnabledHypervisors()
1342
1343 for (hv, fn) in [(constants.HT_XEN_PVM, _TestInstanceUserDownXen),
1344 (constants.HT_XEN_HVM, _TestInstanceUserDownXen),
1345 (constants.HT_KVM, _TestInstanceUserDownKvm)]:
1346 if hv in enabled_hypervisors:
1347 qa_daemon.TestPauseWatcher()
1348 fn(instance)
1349 qa_daemon.TestResumeWatcher()
1350 else:
1351 print "%s hypervisor is not enabled, skipping test for this hypervisor" \
1352 % hv
1353
1354
1355 @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
1356 def TestInstanceCommunication(instance, master):
1357 """Tests instance communication via 'gnt-instance modify'"""
1358
1359 # Enable instance communication network at the cluster level
1360 network_name = "mynetwork"
1361
1362 cmd = ["gnt-cluster", "modify",
1363 "--instance-communication-network=%s" % network_name]
1364 result_output = qa_utils.GetCommandOutput(master.primary,
1365 utils.ShellQuoteArgs(cmd))
1366 print result_output
1367
1368 # Enable instance communication mechanism for this instance
1369 AssertCommand(["gnt-instance", "modify", "-c", "yes", instance.name])
1370
1371 # Reboot instance for changes to NIC to take effect
1372 AssertCommand(["gnt-instance", "reboot", instance.name])
1373
1374 # Check if the instance is properly configured for instance
1375 # communication.
1376 nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
1377 instance.name)
1378
1379 ## Check the output of 'gnt-instance list'
1380 nic_names = _GetInstanceField(instance.name, "nic.names")
1381 nic_names = map(lambda x: x.strip(" '"), nic_names.strip("[]").split(","))
1382
1383 AssertIn(nic_name, nic_names,
1384 msg="Looking for instance communication TAP interface")
1385
1386 nic_n = nic_names.index(nic_name)
1387
1388 nic_ip = _GetInstanceField(instance.name, "nic.ip/%d" % nic_n)
1389 nic_network = _GetInstanceField(instance.name, "nic.network.name/%d" % nic_n)
1390 nic_mode = _GetInstanceField(instance.name, "nic.mode/%d" % nic_n)
1391
1392 AssertEqual(IP4Address.InNetwork(constants.INSTANCE_COMMUNICATION_NETWORK4,
1393 nic_ip),
1394 True,
1395 msg="Checking if NIC's IP if part of the expected network")
1396
1397 AssertEqual(network_name, nic_network,
1398 msg="Checking if NIC's network name matches the expected value")
1399
1400 AssertEqual(constants.INSTANCE_COMMUNICATION_NETWORK_MODE, nic_mode,
1401 msg="Checking if NIC's mode name matches the expected value")
1402
1403 ## Check the output of 'ip route'
1404 cmd = ["ip", "route", "show", nic_ip]
1405 result_output = qa_utils.GetCommandOutput(master.primary,
1406 utils.ShellQuoteArgs(cmd))
1407 result = result_output.split()
1408
1409 AssertEqual(len(result), 5, msg="Checking if the IP route is established")
1410
1411 route_ip = result[0]
1412 route_dev = result[1]
1413 route_tap = result[2]
1414 route_scope = result[3]
1415 route_link = result[4]
1416
1417 AssertEqual(route_ip, nic_ip,
1418 msg="Checking if IP route shows the expected IP")
1419 AssertEqual(route_dev, "dev",
1420 msg="Checking if IP route shows the expected device")
1421 AssertEqual(route_scope, "scope",
1422 msg="Checking if IP route shows the expected scope")
1423 AssertEqual(route_link, "link",
1424 msg="Checking if IP route shows the expected link-level scope")
1425
1426 ## Check the output of 'ip address'
1427 cmd = ["ip", "address", "show", "dev", route_tap]
1428 result_output = qa_utils.GetCommandOutput(master.primary,
1429 utils.ShellQuoteArgs(cmd))
1430 result = result_output.splitlines()
1431
1432 AssertEqual(len(result), 3,
1433 msg="Checking if the IP address is established")
1434
1435 result = result.pop().split()
1436
1437 AssertEqual(len(result), 7,
1438 msg="Checking if the IP address has the expected value")
1439
1440 address_ip = result[1]
1441 address_netmask = result[3]
1442
1443 AssertEqual(address_ip, "169.254.169.254/32",
1444 msg="Checking if the TAP interface has the expected IP")
1445 AssertEqual(address_netmask, "169.254.255.255",
1446 msg="Checking if the TAP interface has the expected netmask")
1447
1448 # Disable instance communication mechanism for this instance
1449 AssertCommand(["gnt-instance", "modify", "-c", "no", instance.name])
1450
1451 # Reboot instance for changes to NIC to take effect
1452 AssertCommand(["gnt-instance", "reboot", instance.name])
1453
1454 # Disable instance communication network at cluster level
1455 cmd = ["gnt-cluster", "modify",
1456 "--instance-communication-network=%s" % network_name]
1457 result_output = qa_utils.GetCommandOutput(master.primary,
1458 utils.ShellQuoteArgs(cmd))
1459 print result_output
1460
1461
1462 available_instance_tests = [
1463 ("instance-add-plain-disk", constants.DT_PLAIN,
1464 TestInstanceAddWithPlainDisk, 1),
1465 ("instance-add-drbd-disk", constants.DT_DRBD8,
1466 TestInstanceAddWithDrbdDisk, 2),
1467 ("instance-add-diskless", constants.DT_DISKLESS,
1468 TestInstanceAddDiskless, 1),
1469 ("instance-add-file", constants.DT_FILE,
1470 TestInstanceAddFile, 1),
1471 ("instance-add-shared-file", constants.DT_SHARED_FILE,
1472 TestInstanceAddSharedFile, 1),
1473 ]