Merge branch 'devel-2.7'
authorBernardo Dal Seno <bdalseno@google.com>
Tue, 12 Mar 2013 11:07:11 +0000 (12:07 +0100)
committerBernardo Dal Seno <bdalseno@google.com>
Tue, 12 Mar 2013 11:32:01 +0000 (12:32 +0100)
* devel-2.7
  Add QA for instance creation with policy violation
  Add QA for policy-instance interactions
  Add QA for cluster policies
  Unit tests for objects.InstancePolicy + a fix
  Unit tests for objects.FillIPolicy() + small fix
  Fix upgrade of policy in objects.Cluster
  Fix instance policy checks for default back-end parameters
  Fix restoring default instance specs in group policies
  Unit tests for cmdlib._GetUpdatedIPolicy()
  Fix policy check for disk templates
  Fix merge 8e09e801 that resulted in duplicated code
  GanetiRapiClient: fix the no_remember option

Conflicts:
qa/qa_cluster.py
qa/qa_instance.py

Conflicts are due to QA config in master using objects instead of
dictionaries. Also updated some new QA code in devel for the same reason.

Signed-off-by: Bernardo Dal Seno <bdalseno@google.com>
Reviewed-by: Guido Trotter <ultrotter@google.com>

1  2 
lib/cmdlib.py
lib/constants.py
lib/objects.py
lib/rapi/client.py
qa/ganeti-qa.py
qa/qa-sample.json
qa/qa_cluster.py
qa/qa_group.py
qa/qa_instance.py
test/py/ganeti.objects_unittest.py

diff --cc lib/cmdlib.py
Simple merge
Simple merge
diff --cc lib/objects.py
Simple merge
Simple merge
diff --cc qa/ganeti-qa.py
@@@ -496,20 -473,72 +498,75 @@@ def RunExclusiveStorageTests()
        try:
          qa_cluster.TestSetExclStorCluster(False)
          instance = qa_instance.TestInstanceAddWithDrbdDisk([node, snode])
 -        qa_cluster.TestSetExclStorCluster(True)
 -        exp_err = [constants.CV_EINSTANCEUNSUITABLENODE]
 -        qa_cluster.AssertClusterVerify(fail=True, errors=exp_err)
 -        qa_instance.TestInstanceRemove(instance)
 +        try:
 +          qa_cluster.TestSetExclStorCluster(True)
 +          exp_err = [constants.CV_EINSTANCEUNSUITABLENODE]
 +          qa_cluster.AssertClusterVerify(fail=True, errors=exp_err)
 +          qa_instance.TestInstanceRemove(instance)
 +        finally:
 +          instance.Release()
        finally:
 -        qa_config.ReleaseNode(snode)
 +        snode.Release()
      qa_cluster.TestSetExclStorCluster(old_es)
    finally:
 -    qa_config.ReleaseNode(node)
 +    node.Release()
  
  
+ def _BuildSpecDict(par, mn, st, mx):
+   return {par: {"min": mn, "std": st, "max": mx}}
+ def TestIPolicyPlainInstance():
+   """Test instance policy interaction with instances"""
+   params = ["mem-size", "cpu-count", "disk-count", "disk-size", "nic-count"]
+   if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
+     print "Template %s not supported" % constants.DT_PLAIN
+     return
+   # This test assumes that the group policy is empty
+   (_, old_specs) = qa_cluster.TestClusterSetISpecs({})
+   node = qa_config.AcquireNode()
+   try:
+     # Log of policy changes, list of tuples: (change, policy_violated)
+     history = []
+     instance = qa_instance.TestInstanceAddWithPlainDisk([node])
+     try:
+       policyerror = [constants.CV_EINSTANCEPOLICY]
+       for par in params:
+         qa_cluster.AssertClusterVerify()
 -        (iminval, imaxval) = qa_instance.GetInstanceSpec(instance["name"], par)
++        (iminval, imaxval) = qa_instance.GetInstanceSpec(instance.name, par)
+         # Some specs must be multiple of 4
+         new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4, imaxval + 4)
+         history.append((new_spec, True))
+         qa_cluster.TestClusterSetISpecs(new_spec)
+         qa_cluster.AssertClusterVerify(warnings=policyerror)
+         if iminval > 0:
+           # Some specs must be multiple of 4
+           if iminval >= 4:
+             upper = iminval - 4
+           else:
+             upper = iminval - 1
+           new_spec = _BuildSpecDict(par, 0, upper, upper)
+           history.append((new_spec, True))
+           qa_cluster.TestClusterSetISpecs(new_spec)
+           qa_cluster.AssertClusterVerify(warnings=policyerror)
+         qa_cluster.TestClusterSetISpecs(old_specs)
+         history.append((old_specs, False))
+       qa_instance.TestInstanceRemove(instance)
+     finally:
 -      qa_config.ReleaseInstance(instance)
++      instance.Release()
+     # Now we replay the same policy changes, and we expect that the instance
+     # cannot be created for the cases where we had a policy violation above
+     for (change, failed) in history:
+       qa_cluster.TestClusterSetISpecs(change)
+       if failed:
+         qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
+       # Instance creation with no policy violation has been tested already
+   finally:
 -    qa_config.ReleaseNode(node)
++    node.Release()
  def RunInstanceTests():
    """Create and exercise instances."""
    instance_tests = [
@@@ -647,13 -664,14 +704,15 @@@ def RunQa()
        qa_cluster.AssertClusterVerify()
  
    finally:
 -    qa_config.ReleaseNode(pnode)
 +    pnode.Release()
  
    RunExclusiveStorageTests()
+   RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"],
+             TestIPolicyPlainInstance)
  
    # Test removing instance with offline drbd secondary
 -  if qa_config.TestEnabled("instance-remove-drbd-offline"):
 +  if qa_config.TestEnabled(["instance-remove-drbd-offline",
 +                            "instance-add-drbd-disk"]):
      # Make sure the master is not put offline
      snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
      try:
Simple merge
@@@ -122,15 -137,14 +137,14 @@@ def AssertClusterVerify(fail=False, err
    """
    cvcmd = "gnt-cluster verify"
    mnode = qa_config.GetMasterNode()
-   if errors:
+   if errors or warnings:
 -    cvout = GetCommandOutput(mnode["primary"], cvcmd + " --error-codes",
 +    cvout = GetCommandOutput(mnode.primary, cvcmd + " --error-codes",
-                              fail=True)
-     actual = _GetCVErrorCodes(cvout)
-     expected = compat.UniqueFrozenset(e for (_, e, _) in errors)
-     if not actual.issuperset(expected):
-       missing = expected.difference(actual)
-       raise qa_error.Error("Cluster-verify didn't return these expected"
-                            " errors: %s" % utils.CommaJoin(missing))
+                              fail=(fail or errors))
+     (act_errs, act_warns) = _GetCVErrorCodes(cvout)
+     if errors:
+       _CheckVerifyErrors(act_errs, errors, "error")
+     if warnings:
+       _CheckVerifyErrors(act_warns, warnings, "warning")
    else:
      AssertCommand(cvcmd, fail=fail, node=mnode)
  
@@@ -424,6 -437,199 +438,199 @@@ def TestClusterModifyBe()
      AssertCommand(["gnt-cluster", "modify", "-B", bep])
  
  
+ _START_IPOLICY_RE = re.compile(r"^(\s*)Instance policy")
+ _START_ISPEC_RE = re.compile(r"^\s+-\s+(std|min|max)")
+ _VALUE_RE = r"([^\s:][^:]*):\s+(\S.*)$"
+ _IPOLICY_PARAM_RE = re.compile(r"^\s+-\s+" + _VALUE_RE)
+ _ISPEC_VALUE_RE = re.compile(r"^\s+" + _VALUE_RE)
+ def _GetClusterIPolicy():
+   """Return the run-time values of the cluster-level instance policy.
+   @rtype: tuple
+   @return: (policy, specs), where:
+       - policy is a dictionary of the policy values, instance specs excluded
+       - specs is dict of dict, specs[par][key] is a spec value, where key is
+         "min", "max", or "std"
+   """
+   mnode = qa_config.GetMasterNode()
 -  info = GetCommandOutput(mnode["primary"], "gnt-cluster info")
++  info = GetCommandOutput(mnode.primary, "gnt-cluster info")
+   inside_policy = False
+   end_ispec_re = None
+   curr_spec = ""
+   specs = {}
+   policy = {}
+   for line in info.splitlines():
+     if inside_policy:
+       # The order of the matching is important, as some REs overlap
+       m = _START_ISPEC_RE.match(line)
+       if m:
+         curr_spec = m.group(1)
+         continue
+       m = _IPOLICY_PARAM_RE.match(line)
+       if m:
+         policy[m.group(1)] = m.group(2).strip()
+         continue
+       m = _ISPEC_VALUE_RE.match(line)
+       if m:
+         assert curr_spec
+         par = m.group(1)
+         if par == "memory-size":
+           par = "mem-size"
+         d = specs.setdefault(par, {})
+         d[curr_spec] = m.group(2).strip()
+         continue
+       assert end_ispec_re is not None
+       if end_ispec_re.match(line):
+         inside_policy = False
+     else:
+       m = _START_IPOLICY_RE.match(line)
+       if m:
+         inside_policy = True
+         # We stop parsing when we find the same indentation level
+         re_str = r"^\s{%s}\S" % len(m.group(1))
+         end_ispec_re = re.compile(re_str)
+   # Sanity checks
+   assert len(specs) > 0
+   good = ("min" in d and "std" in d and "max" in d for d in specs)
+   assert good, "Missing item in specs: %s" % specs
+   assert len(policy) > 0
+   return (policy, specs)
+ def TestClusterModifyIPolicy():
+   """gnt-cluster modify --ipolicy-*"""
+   basecmd = ["gnt-cluster", "modify"]
+   (old_policy, old_specs) = _GetClusterIPolicy()
+   for par in ["vcpu-ratio", "spindle-ratio"]:
+     curr_val = float(old_policy[par])
+     test_values = [
+       (True, 1.0),
+       (True, 1.5),
+       (True, 2),
+       (False, "a"),
+       # Restore the old value
+       (True, curr_val),
+       ]
+     for (good, val) in test_values:
+       cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
+       AssertCommand(cmd, fail=not good)
+       if good:
+         curr_val = val
+       # Check the affected parameter
+       (eff_policy, eff_specs) = _GetClusterIPolicy()
+       AssertEqual(float(eff_policy[par]), curr_val)
+       # Check everything else
+       AssertEqual(eff_specs, old_specs)
+       for p in eff_policy.keys():
+         if p == par:
+           continue
+         AssertEqual(eff_policy[p], old_policy[p])
+   # Disk templates are treated slightly differently
+   par = "disk-templates"
+   disp_str = "enabled disk templates"
+   curr_val = old_policy[disp_str]
+   test_values = [
+     (True, constants.DT_PLAIN),
+     (True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)),
+     (False, "thisisnotadisktemplate"),
+     (False, ""),
+     # Restore the old value
+     (True, curr_val.replace(" ", "")),
+     ]
+   for (good, val) in test_values:
+     cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
+     AssertCommand(cmd, fail=not good)
+     if good:
+       curr_val = val
+     # Check the affected parameter
+     (eff_policy, eff_specs) = _GetClusterIPolicy()
+     AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val)
+     # Check everything else
+     AssertEqual(eff_specs, old_specs)
+     for p in eff_policy.keys():
+       if p == disp_str:
+         continue
+       AssertEqual(eff_policy[p], old_policy[p])
+ def TestClusterSetISpecs(new_specs, fail=False, old_values=None):
+   """Change instance specs.
+   @type new_specs: dict of dict
+   @param new_specs: new_specs[par][key], where key is "min", "max", "std". It
+       can be an empty dictionary.
+   @type fail: bool
+   @param fail: if the change is expected to fail
+   @type old_values: tuple
+   @param old_values: (old_policy, old_specs), as returned by
+      L{_GetClusterIPolicy}
+   @return: same as L{_GetClusterIPolicy}
+   """
+   if old_values:
+     (old_policy, old_specs) = old_values
+   else:
+     (old_policy, old_specs) = _GetClusterIPolicy()
+   if new_specs:
+     cmd = ["gnt-cluster", "modify"]
+     for (par, keyvals) in new_specs.items():
+       if par == "spindle-use":
+         # ignore spindle-use, which is not settable
+         continue
+       cmd += [
+         "--specs-%s" % par,
+         ",".join(["%s=%s" % (k, v) for (k, v) in keyvals.items()]),
+         ]
+     AssertCommand(cmd, fail=fail)
+   # Check the new state
+   (eff_policy, eff_specs) = _GetClusterIPolicy()
+   AssertEqual(eff_policy, old_policy)
+   if fail:
+     AssertEqual(eff_specs, old_specs)
+   else:
+     for par in eff_specs:
+       for key in eff_specs[par]:
+         if par in new_specs and key in new_specs[par]:
+           AssertEqual(int(eff_specs[par][key]), int(new_specs[par][key]))
+         else:
+           AssertEqual(int(eff_specs[par][key]), int(old_specs[par][key]))
+   return (eff_policy, eff_specs)
+ def TestClusterModifyISpecs():
+   """gnt-cluster modify --specs-*"""
+   params = ["mem-size", "disk-size", "disk-count", "cpu-count", "nic-count"]
+   (cur_policy, cur_specs) = _GetClusterIPolicy()
+   for par in params:
+     test_values = [
+       (True, 0, 4, 12),
+       (True, 4, 4, 12),
+       (True, 4, 12, 12),
+       (True, 4, 4, 4),
+       (False, 4, 0, 12),
+       (False, 4, 16, 12),
+       (False, 4, 4, 0),
+       (False, 12, 4, 4),
+       (False, 12, 4, 0),
+       (False, "a", 4, 12),
+       (False, 0, "a", 12),
+       (False, 0, 4, "a"),
+       # This is to restore the old values
+       (True,
+        cur_specs[par]["min"], cur_specs[par]["std"], cur_specs[par]["max"])
+       ]
+     for (good, mn, st, mx) in test_values:
+       new_vals = {par: {"min": str(mn), "std": str(st), "max": str(mx)}}
+       cur_state = (cur_policy, cur_specs)
+       # We update cur_specs, as we've copied the values to restore already
+       (cur_policy, cur_specs) = TestClusterSetISpecs(new_vals, fail=not good,
+                                                      old_values=cur_state)
  def TestClusterInfo():
    """gnt-cluster info"""
    AssertCommand(["gnt-cluster", "info"])
diff --cc qa/qa_group.py
Simple merge
@@@ -73,19 -70,25 +73,25 @@@ def _DiskTest(node, disk_template, fail
              "--os-type=%s" % qa_config.get("os"),
              "--disk-template=%s" % disk_template,
              "--node=%s" % node] +
 -           _GetGenericAddParameters(instance))
 -    cmd.append(instance["name"])
 +           _GetGenericAddParameters(instance, disk_template))
 +    cmd.append(instance.name)
  
-     AssertCommand(cmd)
+     AssertCommand(cmd, fail=fail)
  
-     _CheckSsconfInstanceList(instance.name)
-     instance.SetDiskTemplate(disk_template)
+     if not fail:
 -      _CheckSsconfInstanceList(instance["name"])
 -      qa_config.SetInstanceTemplate(instance, disk_template)
++      _CheckSsconfInstanceList(instance.name)
++      instance.SetDiskTemplate(disk_template)
  
-     return instance
+       return instance
    except:
 -    qa_config.ReleaseInstance(instance)
 +    instance.Release()
      raise
  
+   # Handle the case where creation is expected to fail
+   assert fail
 -  qa_config.ReleaseInstance(instance)
++  instance.Release()
+   return None
  
  def _GetInstanceInfo(instance):
    """Return information about the actual state of an instance.
@@@ -173,8 -171,21 +180,21 @@@ def _GetInstanceField(instance, field)
    """
    master = qa_config.GetMasterNode()
    infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers",
-                                   "-o", field, instance])
-   info_out = qa_utils.GetCommandOutput(master.primary, infocmd).strip()
+                                   "--units", "m", "-o", field, instance])
 -  return qa_utils.GetCommandOutput(master["primary"], infocmd).strip()
++  return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
+ def _GetBoolInstanceField(instance, field):
+   """Get the Boolean value of a field of an instance.
+   @type instance: string
+   @param instance: Instance name
+   @type field: string
+   @param field: Name of the field
+   @rtype: bool
+   """
+   info_out = _GetInstanceField(instance, field)
    if info_out == "Y":
      return True
    elif info_out == "N":
                           " %s" % (field, instance, info_out))
  
  
+ def _GetNumInstanceField(instance, field):
+   """Get a numeric value of a field of an instance.
+   @type instance: string
+   @param instance: Instance name
+   @type field: string
+   @param field: Name of the field
+   @rtype: int or float
+   """
+   info_out = _GetInstanceField(instance, field)
+   try:
+     ret = int(info_out)
+   except ValueError:
+     try:
+       ret = float(info_out)
+     except ValueError:
+       raise qa_error.Error("Field %s of instance %s has a non-numeric value:"
+                            " %s" % (field, instance, info_out))
+   return ret
+ def GetInstanceSpec(instance, spec):
+   """Return the current spec for the given parameter.
+   @type instance: string
+   @param instance: Instance name
+   @type spec: string
+   @param spec: one of the supported parameters: "mem-size", "cpu-count",
+       "disk-count", "disk-size", "nic-count"
+   @rtype: tuple
+   @return: (minspec, maxspec); minspec and maxspec can be different only for
+       memory and disk size
+   """
+   specmap = {
+     "mem-size": ["be/minmem", "be/maxmem"],
+     "cpu-count": ["vcpus"],
+     "disk-count": ["disk.count"],
+     "disk-size": ["disk.size/ "],
+     "nic-count": ["nic.count"],
+     }
+   # For disks, first we need the number of disks
+   if spec == "disk-size":
+     (numdisk, _) = GetInstanceSpec(instance, "disk-count")
+     fields = ["disk.size/%s" % k for k in range(0, numdisk)]
+   else:
+     assert spec in specmap, "%s not in %s" % (spec, specmap)
+     fields = specmap[spec]
+   values = [_GetNumInstanceField(instance, f) for f in fields]
+   return (min(values), max(values))
  def IsFailoverSupported(instance):
 -  templ = qa_config.GetInstanceTemplate(instance)
 -  return templ in constants.DTS_MIRRORED
 +  return instance.disk_template in constants.DTS_MIRRORED
  
  
  def IsMigrationSupported(instance):
  
  
  def IsDiskReplacingSupported(instance):
 -  templ = qa_config.GetInstanceTemplate(instance)
 -  return templ == constants.DT_DRBD8
 +  return instance.disk_template == constants.DT_DRBD8
  
  
- @InstanceCheck(None, INST_UP, RETURN_VALUE)
- def TestInstanceAddWithPlainDisk(nodes):
+ def TestInstanceAddWithPlainDisk(nodes, fail=False):
    """gnt-instance add -t plain"""
    assert len(nodes) == 1
-   return _DiskTest(nodes[0].primary, constants.DT_PLAIN)
 -  instance = _DiskTest(nodes[0]["primary"], constants.DT_PLAIN, fail=fail)
++  instance = _DiskTest(nodes[0].primary, constants.DT_PLAIN, fail=fail)
+   if not fail:
+     qa_utils.RunInstanceCheck(instance, True)
+   return instance
  
  
  @InstanceCheck(None, INST_UP, RETURN_VALUE)
@@@ -215,38 -215,14 +215,45 @@@ class TestClusterObject(unittest.TestCa
      self.fake_cl.enabled_hypervisors = sorted(constants.HYPER_TYPES)
      self.assertEqual(self.fake_cl.primary_hypervisor, constants.HT_CHROOT)
  
+   def testUpgradeConfig(self):
+     # FIXME: This test is incomplete
+     cluster = objects.Cluster()
+     cluster.UpgradeConfig()
+     cluster = objects.Cluster(ipolicy={"unknown_key": None})
+     self.assertRaises(errors.ConfigurationError, cluster.UpgradeConfig)
  
 +class TestClusterObjectTcpUdpPortPool(unittest.TestCase):
 +  def testNewCluster(self):
 +    self.assertTrue(objects.Cluster().tcpudp_port_pool is None)
 +
 +  def testSerializingEmpty(self):
 +    self.assertEqual(objects.Cluster().ToDict(), {
 +      "tcpudp_port_pool": [],
 +      })
 +
 +  def testSerializing(self):
 +    cluster = objects.Cluster.FromDict({})
 +    self.assertEqual(cluster.tcpudp_port_pool, set())
 +
 +    cluster.tcpudp_port_pool.add(3546)
 +    cluster.tcpudp_port_pool.add(62511)
 +
 +    data = cluster.ToDict()
 +    self.assertEqual(data.keys(), ["tcpudp_port_pool"])
 +    self.assertEqual(sorted(data["tcpudp_port_pool"]), sorted([3546, 62511]))
 +
 +  def testDeserializingEmpty(self):
 +    cluster = objects.Cluster.FromDict({})
 +    self.assertEqual(cluster.tcpudp_port_pool, set())
 +
 +  def testDeserialize(self):
 +    cluster = objects.Cluster.FromDict({
 +      "tcpudp_port_pool": [26214, 10039, 267],
 +      })
 +    self.assertEqual(cluster.tcpudp_port_pool, set([26214, 10039, 267]))
 +
 +
  class TestOS(unittest.TestCase):
    ALL_DATA = [
      "debootstrap",