Merge branch 'stable-2.11' into stable-2.12
authorKlaus Aehlig <aehlig@google.com>
Tue, 3 Jun 2014 14:05:02 +0000 (16:05 +0200)
committerKlaus Aehlig <aehlig@google.com>
Tue, 3 Jun 2014 15:04:48 +0000 (17:04 +0200)
* stable-2.11
  Assume that instance keys might not be present in watcher
  Modify 'Hypervisor.ListInstances' to exclude user downs
  Extend QA for instance user down
  Allow instance start for user down instances
  Modify watcher to properly cleanup user down instances
  Modify instance shutdown to optionally mark as user down
  Fix instance queries to correctly report USER_down
  Modify config to update 'admin_state_source'
  Add field 'admin_state_source' to unit tests
  Add field 'admin_state_source' to the Instance class
  Add type 'AdminStateSource', tracks changes to 'admin_state'
  luxid: report error-down when user shutdown not allowed
  Correctly report user-down instance status
  Use standard 'Raise' method for LU exceptions
  Remove unnecessary list copying
  Use keyword args when passing 'hvparams' to 'ListInstances'
  Fix whitespace
  Fix typo in docstring

* stable-2.10
  Revision bump for 2.10.5
  Add NEWS file for 2.10.5
  Check drbd helper only in VM capable nodes
  Raise' called inside 'CheckPrereq' needs the prereq kw

Conflicts:
lib/backend.py: union of changes
lib/cmdlib/instance.py: apply admin_source to stable-2.12 version
lib/cmdlib/instance_operation.py: union of additions
lib/config.py: tedious, but elementary, uion; additionally, follow
    the semantical change of locking to _ConfigSync()
lib/ht.py: trivial
qa/ganeti-qa.py: take stable-2.12
src/Ganeti/Luxi.hs: take stable-2.12
src/Ganeti/Objects.hs: trivial
test/data/instance-prim-sec.txt: delete
test/hs/Test/Ganeti/Query/Instance.hs: trivial
tools/cfgupgrade: take all the upgrade parts, but only
    use the downgrade parts from stable-2.12

Signed-off-by: Klaus Aehlig <aehlig@google.com>
Reviewed-by: Petr Pudlak <pudlak@google.com>

30 files changed:
1  2 
NEWS
lib/backend.py
lib/cmdlib/cluster.py
lib/cmdlib/common.py
lib/cmdlib/instance.py
lib/cmdlib/instance_migration.py
lib/cmdlib/instance_operation.py
lib/cmdlib/instance_query.py
lib/cmdlib/node.py
lib/config.py
lib/ht.py
lib/hypervisor/hv_kvm/__init__.py
lib/hypervisor/hv_xen.py
lib/objects.py
lib/query.py
lib/watcher/__init__.py
qa/qa_instance.py
src/Ganeti/Constants.hs
src/Ganeti/Objects.hs
src/Ganeti/OpCodes.hs
src/Ganeti/OpParams.hs
src/Ganeti/Query/Instance.hs
src/Ganeti/Types.hs
test/hs/Test/Ganeti/Objects.hs
test/hs/Test/Ganeti/OpCodes.hs
test/hs/Test/Ganeti/Query/Instance.hs
test/py/cmdlib/node_unittest.py
test/py/cmdlib/testsupport/config_mock.py
test/py/ganeti.query_unittest.py
tools/cfgupgrade

diff --cc NEWS
Simple merge
diff --cc lib/backend.py
@@@ -1516,10 -1504,10 +1513,10 @@@ def GetInstanceMigratable(instance)
    """
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
    iname = instance.name
-   if iname not in hyper.ListInstances(instance.hvparams):
+   if iname not in hyper.ListInstances(hvparams=instance.hvparams):
      _Fail("Instance %s is not running", iname)
  
 -  for idx in range(len(instance.disks)):
 +  for idx in range(len(instance.disks_info)):
      link_name = _GetBlockDevSymlinkPath(iname, idx)
      if not os.path.islink(link_name):
        logging.warning("Instance %s is missing symlink %s for disk %d",
@@@ -1929,7 -1926,7 +1935,7 @@@ def InstanceShutdown(instance, timeout
    except errors.HypervisorError, err:
      logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
  
-   _RemoveBlockDevLinks(iname, instance.disks_info)
 -  _RemoveBlockDevLinks(instance.name, instance.disks)
++  _RemoveBlockDevLinks(instance.name, instance.disks_info)
  
  
  def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
Simple merge
Simple merge
@@@ -1334,44 -1288,113 +1336,43 @@@ class LUInstanceCreate(LogicalUnit)
  
      self.dry_run_result = list(node_uuids)
  
 -  def Exec(self, feedback_fn):
 -    """Create and add the instance to the cluster.
 -
 -    """
 -    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
 -                self.owned_locks(locking.LEVEL_NODE)), \
 -      "Node locks differ from node resource locks"
 -    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
 -
 -    ht_kind = self.op.hypervisor
 -    if ht_kind in constants.HTS_REQ_PORT:
 -      network_port = self.cfg.AllocatePort()
 -    else:
 -      network_port = None
 -
 -    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
 -
 -    # This is ugly but we got a chicken-egg problem here
 -    # We can only take the group disk parameters, as the instance
 -    # has no disks yet (we are generating them right here).
 -    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
 -    disks = GenerateDiskTemplate(self,
 -                                 self.op.disk_template,
 -                                 instance_uuid, self.pnode.uuid,
 -                                 self.secondaries,
 -                                 self.disks,
 -                                 self.instance_file_storage_dir,
 -                                 self.op.file_driver,
 -                                 0,
 -                                 feedback_fn,
 -                                 self.cfg.GetGroupDiskParams(nodegroup))
 -
 -    iobj = objects.Instance(name=self.op.instance_name,
 -                            uuid=instance_uuid,
 -                            os=self.op.os_type,
 -                            primary_node=self.pnode.uuid,
 -                            nics=self.nics, disks=disks,
 -                            disk_template=self.op.disk_template,
 -                            disks_active=False,
 -                            admin_state=constants.ADMINST_DOWN,
 -                            admin_state_source=constants.ADMIN_SOURCE,
 -                            network_port=network_port,
 -                            beparams=self.op.beparams,
 -                            hvparams=self.op.hvparams,
 -                            hypervisor=self.op.hypervisor,
 -                            osparams=self.op.osparams,
 -                            )
 -
 -    if self.op.tags:
 -      for tag in self.op.tags:
 -        iobj.AddTag(tag)
 +  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
 +    """Removes degraded disks and instance.
  
 -    if self.adopt_disks:
 -      if self.op.disk_template == constants.DT_PLAIN:
 -        # rename LVs to the newly-generated names; we need to construct
 -        # 'fake' LV disks with the old data, plus the new unique_id
 -        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
 -        rename_to = []
 -        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
 -          rename_to.append(t_dsk.logical_id)
 -          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
 -        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
 -                                               zip(tmp_disks, rename_to))
 -        result.Raise("Failed to rename adoped LVs")
 -    else:
 -      feedback_fn("* creating instance disks...")
 -      try:
 -        CreateDisks(self, iobj)
 -      except errors.OpExecError:
 -        self.LogWarning("Device creation failed")
 -        self.cfg.ReleaseDRBDMinors(instance_uuid)
 -        raise
 +    It optionally checks whether disks are degraded.  If the disks are
 +    degraded, they are removed and the instance is also removed from
 +    the configuration.
  
 -    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
 +    If L{disk_abort} is True, then the disks are considered degraded
 +    and removed, and the instance is removed from the configuration.
  
 -    self.cfg.AddInstance(iobj, self.proc.GetECId())
 +    If L{disk_abort} is False, then it first checks whether disks are
 +    degraded and, if so, it removes the disks and the instance is
 +    removed from the configuration.
  
 -    # Declare that we don't want to remove the instance lock anymore, as we've
 -    # added the instance to the config
 -    del self.remove_locks[locking.LEVEL_INSTANCE]
 +    @type feedback_fn: callable
 +    @param feedback_fn: function used send feedback back to the caller
  
 -    if self.op.mode == constants.INSTANCE_IMPORT:
 -      # Release unused nodes
 -      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
 -    else:
 -      # Release all nodes
 -      ReleaseLocks(self, locking.LEVEL_NODE)
 +    @type disk_abort: boolean
 +    @param disk_abort:
 +      True if disks are degraded, False to first check if disks are
 +      degraded
 +    @type instance: L{objects.Instance}
 +    @param instance: instance containing the disks to check
  
 -    disk_abort = False
 -    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
 -      feedback_fn("* wiping instance disks...")
 -      try:
 -        WipeDisks(self, iobj)
 -      except errors.OpExecError, err:
 -        logging.exception("Wiping disks failed")
 -        self.LogWarning("Wiping instance disks failed (%s)", err)
 -        disk_abort = True
 +    @rtype: NoneType
 +    @return: None
 +    @raise errors.OpPrereqError: if disks are degraded
  
 +    """
      if disk_abort:
 -      # Something is already wrong with the disks, don't do anything else
        pass
      elif self.op.wait_for_sync:
 -      disk_abort = not WaitForSync(self, iobj)
 -    elif iobj.disk_template in constants.DTS_INT_MIRROR:
 +      disk_abort = not WaitForSync(self, instance)
 +    elif instance.disk_template in constants.DTS_INT_MIRROR:
        # make sure the disks are not degraded (still sync-ing is ok)
        feedback_fn("* checking mirrors status")
 -      disk_abort = not WaitForSync(self, iobj, oneshot=True)
 +      disk_abort = not WaitForSync(self, instance, oneshot=True)
      else:
        disk_abort = False
  
            raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
                                         % self.op.mode)
  
 -        # Run rename script on newly imported instance
          assert iobj.name == self.op.instance_name
 -        feedback_fn("Running rename script for %s" % self.op.instance_name)
 -        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
 -                                                   rename_from,
 -                                                   self.op.debug_level)
 -        result.Warn("Failed to run rename script for %s on node %s" %
 -                    (self.op.instance_name, self.pnode.name), self.LogWarning)
 +
 +        # Run rename script on newly imported instance
 +        if iobj.os:
 +          feedback_fn("Running rename script for %s" % self.op.instance_name)
 +          result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
 +                                                     rename_from,
 +                                                     self.op.debug_level)
 +          result.Warn("Failed to run rename script for %s on node %s" %
 +                      (self.op.instance_name, self.pnode.name), self.LogWarning)
 +
 +  def GetOsInstallPackageEnvironment(self, instance, script):
 +    """Returns the OS scripts environment for the helper VM
 +
 +    @type instance: L{objects.Instance}
 +    @param instance: instance for which the OS scripts are run
 +
 +    @type script: string
 +    @param script: script to run (e.g.,
 +                   constants.OS_SCRIPT_CREATE_UNTRUSTED)
 +
 +    @rtype: dict of string to string
 +    @return: OS scripts environment for the helper VM
 +
 +    """
 +    env = {"OS_SCRIPT": script}
 +
 +    # We pass only the instance's disks, not the helper VM's disks.
 +    if instance.hypervisor == constants.HT_KVM:
 +      prefix = "/dev/vd"
 +    elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
 +      prefix = "/dev/xvd"
 +    else:
 +      raise errors.OpExecError("Cannot run OS scripts in a virtualized"
 +                               " environment for hypervisor '%s'"
 +                               % instance.hypervisor)
 +
 +    num_disks = len(self.cfg.GetInstanceDisks(instance.uuid))
 +
 +    for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1,
 +                                                         start=1)):
 +      env["DISK_%d_PATH" % idx] = disk_label
 +
 +    return env
 +
 +  def UpdateInstanceOsInstallPackage(self, feedback_fn, instance, override_env):
 +    """Updates the OS parameter 'os-install-package' for an instance.
 +
 +    The OS install package is an archive containing an OS definition
 +    and a file containing the environment variables needed to run the
 +    OS scripts.
 +
 +    The OS install package is served by the metadata daemon to the
 +    instances, so the OS scripts can run inside the virtualized
 +    environment.
 +
 +    @type feedback_fn: callable
 +    @param feedback_fn: function used send feedback back to the caller
 +
 +    @type instance: L{objects.Instance}
 +    @param instance: instance for which the OS parameter
 +                     'os-install-package' is updated
 +
 +    @type override_env: dict of string to string
 +    @param override_env: if supplied, it overrides the environment of
 +                         the export OS scripts archive
 +
 +    """
 +    if "os-install-package" in instance.osparams:
 +      feedback_fn("Using OS install package '%s'" %
 +                  instance.osparams["os-install-package"])
 +    else:
 +      result = self.rpc.call_os_export(instance.primary_node, instance,
 +                                       override_env)
 +      result.Raise("Could not export OS '%s'" % instance.os)
 +      instance.osparams["os-install-package"] = result.payload
 +
 +      feedback_fn("Created OS install package '%s'" % result.payload)
 +
 +  def RunOsScriptsVirtualized(self, feedback_fn, instance):
 +    """Runs the OS scripts inside a safe virtualized environment.
 +
 +    The virtualized environment reuses the instance and temporarily
 +    creates a disk onto which the image of the helper VM is dumped.
 +    The temporary disk is used to boot the helper VM.  The OS scripts
 +    are passed to the helper VM through the metadata daemon and the OS
 +    install package.
 +
 +    @type feedback_fn: callable
 +    @param feedback_fn: function used send feedback back to the caller
 +
 +    @type instance: L{objects.Instance}
 +    @param instance: instance for which the OS scripts must be run
 +                     inside the virtualized environment
 +
 +    """
 +    install_image = self.cfg.GetInstallImage()
 +
 +    if not install_image:
 +      raise errors.OpExecError("Cannot create install instance because an"
 +                               " install image has not been specified")
 +
 +    disk_size = DetermineImageSize(self, install_image, instance.primary_node)
 +
 +    env = self.GetOsInstallPackageEnvironment(
 +      instance,
 +      constants.OS_SCRIPT_CREATE_UNTRUSTED)
 +    self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env)
 +    UpdateMetadata(feedback_fn, self.rpc, instance,
 +                   osparams_private=self.op.osparams_private,
 +                   osparams_secret=self.op.osparams_secret)
 +
 +    with TemporaryDisk(self,
 +                       instance,
 +                       [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
 +                       feedback_fn):
 +      feedback_fn("Activating instance disks")
 +      StartInstanceDisks(self, instance, False)
 +
 +      feedback_fn("Imaging disk with install image")
 +      ImageDisks(self, instance, install_image)
 +
 +      feedback_fn("Starting instance with install image")
 +      result = self.rpc.call_instance_start(instance.primary_node,
 +                                            (instance, [], []),
 +                                            False, self.op.reason)
 +      result.Raise("Could not start instance '%s' with the install image '%s'"
 +                   % (instance.name, install_image))
 +
 +      # First wait for the instance to start up
 +      running_check = lambda: IsInstanceRunning(self, instance,
 +                                                check_user_shutdown=True)
 +      instance_up = retry.SimpleRetry(True, running_check, 5.0,
 +                                      self.op.helper_startup_timeout)
 +      if not instance_up:
 +        raise errors.OpExecError("Could not boot instance using install image"
 +                                 " '%s'" % install_image)
 +
 +      feedback_fn("Instance is up, now awaiting shutdown")
 +
 +      # Then for it to be finished, detected by its shutdown
 +      instance_up = retry.SimpleRetry(False, running_check, 20.0,
 +                                      self.op.helper_shutdown_timeout)
 +      if instance_up:
 +        self.LogWarning("Installation not completed prior to timeout, shutting"
 +                        " down instance forcibly")
 +
 +    feedback_fn("Installation complete")
 +
 +  def Exec(self, feedback_fn):
 +    """Create and add the instance to the cluster.
 +
 +    """
 +    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
 +                self.owned_locks(locking.LEVEL_NODE)), \
 +      "Node locks differ from node resource locks"
 +
 +    ht_kind = self.op.hypervisor
 +    if ht_kind in constants.HTS_REQ_PORT:
 +      network_port = self.cfg.AllocatePort()
 +    else:
 +      network_port = None
 +
 +    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
 +
 +    # This is ugly but we got a chicken-egg problem here
 +    # We can only take the group disk parameters, as the instance
 +    # has no disks yet (we are generating them right here).
 +    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
 +    disks = GenerateDiskTemplate(self,
 +                                 self.op.disk_template,
 +                                 instance_uuid, self.pnode.uuid,
 +                                 self.secondaries,
 +                                 self.disks,
 +                                 self.instance_file_storage_dir,
 +                                 self.op.file_driver,
 +                                 0,
 +                                 feedback_fn,
 +                                 self.cfg.GetGroupDiskParams(nodegroup))
 +
 +    if self.op.os_type is None:
 +      os_type = ""
 +    else:
 +      os_type = self.op.os_type
 +
 +    iobj = objects.Instance(name=self.op.instance_name,
 +                            uuid=instance_uuid,
 +                            os=os_type,
 +                            primary_node=self.pnode.uuid,
 +                            nics=self.nics, disks=[],
 +                            disk_template=self.op.disk_template,
 +                            disks_active=False,
 +                            admin_state=constants.ADMINST_DOWN,
++                            admin_state_source=constants.ADMIN_SOURCE,
 +                            network_port=network_port,
 +                            beparams=self.op.beparams,
 +                            hvparams=self.op.hvparams,
 +                            hypervisor=self.op.hypervisor,
 +                            osparams=self.op.osparams,
 +                            osparams_private=self.op.osparams_private,
 +                            )
 +
 +    if self.op.tags:
 +      for tag in self.op.tags:
 +        iobj.AddTag(tag)
 +
 +    if self.adopt_disks:
 +      if self.op.disk_template == constants.DT_PLAIN:
 +        # rename LVs to the newly-generated names; we need to construct
 +        # 'fake' LV disks with the old data, plus the new unique_id
 +        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
 +        rename_to = []
 +        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
 +          rename_to.append(t_dsk.logical_id)
 +          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
 +        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
 +                                               zip(tmp_disks, rename_to))
 +        result.Raise("Failed to rename adoped LVs")
 +    else:
 +      feedback_fn("* creating instance disks...")
 +      try:
 +        CreateDisks(self, iobj, instance_disks=disks)
 +      except errors.OpExecError:
 +        self.LogWarning("Device creation failed")
 +        self.cfg.ReleaseDRBDMinors(instance_uuid)
 +        raise
 +
 +    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
 +    self.cfg.AddInstance(iobj, self.proc.GetECId())
 +
 +    feedback_fn("adding disks to cluster config")
 +    for disk in disks:
 +      self.cfg.AddInstanceDisk(iobj.uuid, disk)
 +
 +    # re-read the instance from the configuration
 +    iobj = self.cfg.GetInstanceInfo(iobj.uuid)
 +
 +    if self.op.mode == constants.INSTANCE_IMPORT:
 +      # Release unused nodes
 +      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
 +    else:
 +      # Release all nodes
 +      ReleaseLocks(self, locking.LEVEL_NODE)
 +
 +    # Wipe disks
 +    disk_abort = False
 +    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
 +      feedback_fn("* wiping instance disks...")
 +      try:
 +        WipeDisks(self, iobj)
 +      except errors.OpExecError, err:
 +        logging.exception("Wiping disks failed")
 +        self.LogWarning("Wiping instance disks failed (%s)", err)
 +        disk_abort = True
 +
 +    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
 +
 +    # Image disks
 +    os_image = objects.GetOSImage(iobj.osparams)
 +    disk_abort = False
 +
 +    if not self.adopt_disks and os_image is not None:
 +      feedback_fn("* imaging instance disks...")
 +      try:
 +        ImageDisks(self, iobj, os_image)
 +      except errors.OpExecError, err:
 +        logging.exception("Imaging disks failed")
 +        self.LogWarning("Imaging instance disks failed (%s)", err)
 +        disk_abort = True
 +
 +    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
 +
 +    # instance disks are now active
 +    iobj.disks_active = True
 +
 +    # Release all node resource locks
 +    ReleaseLocks(self, locking.LEVEL_NODE_RES)
 +
 +    if iobj.os:
 +      result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node]
 +      result.Raise("Failed to get OS '%s'" % iobj.os)
 +
 +      trusted = None
 +
 +      for (name, _, _, _, _, _, _, os_trusted) in result.payload:
 +        if name == objects.OS.GetName(iobj.os):
 +          trusted = os_trusted
 +          break
 +
 +      if trusted is None:
 +        raise errors.OpPrereqError("OS '%s' is not available in node '%s'" %
 +                                   (iobj.os, iobj.primary_node))
 +      elif trusted:
 +        self.RunOsScripts(feedback_fn, iobj)
 +      else:
 +        self.RunOsScriptsVirtualized(feedback_fn, iobj)
 +        # Instance is modified by 'RunOsScriptsVirtualized',
 +        # therefore, it must be retrieved once again from the
 +        # configuration, otherwise there will be a config object
 +        # version mismatch.
 +        iobj = self.cfg.GetInstanceInfo(iobj.uuid)
 +
 +    # Update instance metadata so that it can be reached from the
 +    # metadata service.
 +    UpdateMetadata(feedback_fn, self.rpc, iobj,
 +                   osparams_private=self.op.osparams_private,
 +                   osparams_secret=self.op.osparams_secret)
  
      assert not self.owned_locks(locking.LEVEL_NODE_RES)
  
Simple merge
@@@ -161,8 -171,16 +172,17 @@@ class LUInstanceStartup(LogicalUnit)
        assert self.op.ignore_offline_nodes
        self.LogInfo("Primary node offline, marked instance as started")
      else:
+       if self.requires_cleanup:
+         result = self.rpc.call_instance_shutdown(
+           self.instance.primary_node,
+           self.instance,
+           self.op.shutdown_timeout, self.op.reason)
+         result.Raise("Could not shutdown instance '%s'", self.instance.name)
+         ShutdownInstanceDisks(self, self.instance)
        StartInstanceDisks(self, self.instance, self.op.force)
 +      self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
  
        result = \
          self.rpc.call_instance_start(self.instance.primary_node,
@@@ -234,8 -278,11 +281,13 @@@ class LUInstanceShutdown(LogicalUnit)
      # If the instance is offline we shouldn't mark it as down, as that
      # resets the offline flag.
      if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
 +      self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
 +
+       if self.op.admin_state_source == constants.ADMIN_SOURCE:
+         self.cfg.MarkInstanceDown(self.instance.uuid)
+       elif self.op.admin_state_source == constants.USER_SOURCE:
+         self.cfg.MarkInstanceUserDown(self.instance.uuid)
      if self.primary_offline:
        assert self.op.ignore_offline_nodes
        self.LogInfo("Primary node offline, marked instance as stopped")
Simple merge
Simple merge
diff --cc lib/config.py
@@@ -1849,17 -1509,15 +1849,18 @@@ class ConfigWriter(object)
        raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
                                        " in use" % (item.name, item.uuid))
  
-   def _SetInstanceStatus(self, inst_uuid, status, disks_active):
+   def _SetInstanceStatus(self, inst_uuid, status, disks_active,
+                          admin_state_source):
      """Set the instance's status to a given value.
  
 +    @rtype: L{objects.Instance}
 +    @return: the updated instance object
 +
      """
 -    if inst_uuid not in self._config_data.instances:
 +    if inst_uuid not in self._ConfigData().instances:
        raise errors.ConfigurationError("Unknown instance '%s'" %
                                        inst_uuid)
 -    instance = self._config_data.instances[inst_uuid]
 +    instance = self._ConfigData().instances[inst_uuid]
  
      if status is None:
        status = instance.admin_state
             "Invalid status '%s' passed to SetInstanceStatus" % (status,)
  
      if instance.admin_state != status or \
-        instance.disks_active != disks_active:
+        instance.disks_active != disks_active or \
+        instance.admin_state_source != admin_state_source:
        instance.admin_state = status
        instance.disks_active = disks_active
+       instance.admin_state_source = admin_state_source
        instance.serial_no += 1
        instance.mtime = time.time()
 -      self._WriteConfig()
 +    return instance
  
 -  @locking.ssynchronized(_config_lock)
 +  @_ConfigSync()
    def MarkInstanceUp(self, inst_uuid):
      """Mark the instance status to up in the config.
  
      This also sets the instance disks active flag.
  
 +    @rtype: L{objects.Instance}
 +    @return: the updated instance object
 +
      """
-     return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True)
 -    self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True,
 -                            constants.ADMIN_SOURCE)
++    return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True,
++                                   constants.ADMIN_SOURCE)
  
 -  @locking.ssynchronized(_config_lock)
 +  @_ConfigSync()
    def MarkInstanceOffline(self, inst_uuid):
      """Mark the instance status to down in the config.
  
      This also clears the instance disks active flag.
  
 +    @rtype: L{objects.Instance}
 +    @return: the updated instance object
 +
      """
-     return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False)
 -    self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False,
 -                            constants.ADMIN_SOURCE)
++    return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False,
++                                   constants.ADMIN_SOURCE)
  
 -  @locking.ssynchronized(_config_lock)
 +  @_ConfigSync()
    def RemoveInstance(self, inst_uuid):
      """Remove the instance from the configuration.
  
      This does not touch the instance disks active flag, as shut down instances
      can still have active disks.
  
 +    @rtype: L{objects.Instance}
 +    @return: the updated instance object
 +
      """
-     return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None)
 -    self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
 -                            constants.ADMIN_SOURCE)
++    return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
++                                   constants.ADMIN_SOURCE)
 -  @locking.ssynchronized(_config_lock)
++  @_ConfigSync()
+   def MarkInstanceUserDown(self, inst_uuid):
+     """Mark the status of an instance to user down in the configuration.
+     This does not touch the instance disks active flag, as user shut
+     down instances can still have active disks.
+     """
+     self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
+                             constants.USER_SOURCE)
  
 -  @locking.ssynchronized(_config_lock)
 +  @_ConfigSync()
    def MarkInstanceDisksActive(self, inst_uuid):
      """Mark the status of instance disks active.
  
 +    @rtype: L{objects.Instance}
 +    @return: the updated instance object
 +
      """
-     return self._SetInstanceStatus(inst_uuid, None, True)
 -    self._SetInstanceStatus(inst_uuid, None, True, None)
++    return self._SetInstanceStatus(inst_uuid, None, True, None)
  
 -  @locking.ssynchronized(_config_lock)
 +  @_ConfigSync()
    def MarkInstanceDisksInactive(self, inst_uuid):
      """Mark the status of instance disks inactive.
  
 +    @rtype: L{objects.Instance}
 +    @return: the updated instance object
 +
      """
-     return self._SetInstanceStatus(inst_uuid, None, False)
 -    self._SetInstanceStatus(inst_uuid, None, False, None)
++    return self._SetInstanceStatus(inst_uuid, None, False, None)
  
    def _UnlockedGetInstanceList(self):
      """Get the list of instances.
diff --cc lib/ht.py
Simple merge
Simple merge
Simple merge
diff --cc lib/objects.py
@@@ -1157,11 -1073,10 +1157,12 @@@ class Instance(TaggableObject)
      "hvparams",
      "beparams",
      "osparams",
 +    "osparams_private",
      "admin_state",
+     "admin_state_source",
      "nics",
      "disks",
 +    "disks_info",
      "disk_template",
      "disks_active",
      "network_port",
      """Fill defaults for missing configuration values.
  
      """
+     if self.admin_state_source is None:
+       self.admin_state_source = constants.ADMIN_SOURCE
      for nic in self.nics:
        nic.UpgradeConfig()
 -    for disk in self.disks:
 -      disk.UpgradeConfig()
 +    if self.disks is None:
 +      self.disks = []
      if self.hvparams:
        for key in constants.HVC_GLOBALS:
          try:
diff --cc lib/query.py
Simple merge
Simple merge
@@@ -1208,10 -1236,10 +1287,10 @@@ def _TestInstanceUserDownXen(instance)
  
  
  @InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
- def _TestInstanceUserDownKvm(instance, master):
+ def _TestInstanceUserDownKvm(instance):
    def _StopKVMInstance():
      AssertCommand("pkill -f \"\\-name %s\"" % instance.name, node=primary)
 -    time.sleep(5)
 +    time.sleep(10)
  
    AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=true",
                   instance.name])
Simple merge
@@@ -461,19 -439,19 +461,20 @@@ $(buildParam "Be" "bep
    ])
  
  $(buildObject "Instance" "inst" $
 -  [ simpleField "name"               [t| String             |]
 -  , simpleField "primary_node"       [t| String             |]
 -  , simpleField "os"                 [t| String             |]
 -  , simpleField "hypervisor"         [t| Hypervisor         |]
 -  , simpleField "hvparams"           [t| HvParams           |]
 -  , simpleField "beparams"           [t| PartialBeParams    |]
 -  , simpleField "osparams"           [t| OsParams           |]
 -  , simpleField "admin_state"        [t| AdminState         |]
 +  [ simpleField "name"             [t| String             |]
 +  , simpleField "primary_node"     [t| String             |]
 +  , simpleField "os"               [t| String             |]
 +  , simpleField "hypervisor"       [t| Hypervisor         |]
 +  , simpleField "hvparams"         [t| HvParams           |]
 +  , simpleField "beparams"         [t| PartialBeParams    |]
 +  , simpleField "osparams"         [t| OsParams           |]
 +  , simpleField "osparams_private" [t| OsParamsPrivate    |]
 +  , simpleField "admin_state"      [t| AdminState         |]
+   , simpleField "admin_state_source" [t| AdminStateSource   |]
 -  , simpleField "nics"               [t| [PartialNic]       |]
 -  , simpleField "disks"              [t| [Disk]             |]
 -  , simpleField "disk_template"      [t| DiskTemplate       |]
 -  , simpleField "disks_active"       [t| Bool               |]
 +  , simpleField "nics"             [t| [PartialNic]       |]
 +  , simpleField "disks"            [t| [String]           |]
 +  , simpleField "disk_template"    [t| DiskTemplate       |]
 +  , simpleField "disks_active"     [t| Bool               |]
    , optionalField $ simpleField "network_port" [t| Int  |]
    ]
    ++ timeStampFields
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -125,10 -125,10 +127,12 @@@ instance Arbitrary Instance wher
        <*> arbitrary
        -- osparams
        <*> pure (GenericContainer Map.empty)
 +      -- osparams_private
 +      <*> pure (GenericContainer Map.empty)
        -- admin_state
        <*> arbitrary
+       -- admin_state_source
+       <*> arbitrary
        -- nics
        <*> arbitrary
        -- disks
Simple merge
@@@ -52,9 -52,9 +53,9 @@@ createInstance name pnodeUuid adminStat
    Instance name pnodeUuid "" Kvm
      (GenericContainer Map.empty)
      (PartialBeParams Nothing Nothing Nothing Nothing Nothing Nothing)
 -    (GenericContainer Map.empty)
 -    adminState adminStateSource [] [] DTDrbd8 False Nothing 0.0 0.0 "" 0
 -    Set.empty
 +    (GenericContainer Map.empty) (GenericContainer Map.empty)
-     adminState [] [] DTDrbd8 False Nothing epochTime epochTime "" 0 Set.empty
++    adminState adminStateSource [] [] DTDrbd8 False Nothing epochTime epochTime "" 0 Set.empty
 +  where epochTime = TOD 0 0
  
  -- | A fake InstanceInfo to be used to check values.
  fakeInstanceInfo :: InstanceInfo
Simple merge
@@@ -192,8 -181,8 +192,9 @@@ class ConfigMock(config.ConfigWriter)
                       hvparams=None,
                       beparams=None,
                       osparams=None,
 +                     osparams_private=None,
                       admin_state=None,
+                      admin_state_source=None,
                       nics=None,
                       disks=None,
                       disk_template=None,
        beparams = {}
      if osparams is None:
        osparams = {}
 +    if osparams_private is None:
 +      osparams_private = {}
      if admin_state is None:
        admin_state = constants.ADMINST_DOWN
+     if admin_state_source is None:
+       admin_state_source = constants.ADMIN_SOURCE
      if nics is None:
        nics = [self.CreateNic()]
      if disk_template is None:
                              hvparams=hvparams,
                              beparams=beparams,
                              osparams=osparams,
 +                            osparams_private=osparams_private,
                              admin_state=admin_state,
+                             admin_state_source=admin_state_source,
                              nics=nics,
 -                            disks=disks,
 +                            disks=[],
                              disk_template=disk_template,
                              disks_active=disks_active,
                              network_port=network_port)
@@@ -694,10 -694,11 +694,12 @@@ class TestInstanceQuery(unittest.TestCa
        objects.Instance(name="inst1", hvparams={}, beparams={}, nics=[],
          uuid="inst1-uuid",
          ctime=1291244000, mtime=1291244400, serial_no=30,
-         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_PVM,
+         admin_state=constants.ADMINST_UP,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_PVM,
          os="linux1",
          primary_node="node1-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_PLAIN,
          disks=[],
          disks_active=True,
        objects.Instance(name="inst2", hvparams={}, nics=[],
          uuid="inst2-uuid",
          ctime=1291211000, mtime=1291211077, serial_no=1,
-         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
+         admin_state=constants.ADMINST_UP,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_HVM,
          os="deb99",
          primary_node="node5-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DISKLESS,
          disks=[],
          disks_active=True,
        objects.Instance(name="inst3", hvparams={}, beparams={},
          uuid="inst3-uuid",
          ctime=1291011000, mtime=1291013000, serial_no=1923,
-         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_KVM,
+         admin_state=constants.ADMINST_DOWN,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_KVM,
          os="busybox",
          primary_node="node6-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DRBD8,
          disks=[],
          disks_active=False,
        objects.Instance(name="inst4", hvparams={}, beparams={},
          uuid="inst4-uuid",
          ctime=1291244390, mtime=1291244395, serial_no=25,
-         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_PVM,
+         admin_state=constants.ADMINST_DOWN,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_PVM,
          os="linux1",
          primary_node="nodeoff2-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DRBD8,
          disks=[],
          disks_active=True,
        objects.Instance(name="inst5", hvparams={}, nics=[],
          uuid="inst5-uuid",
          ctime=1231211000, mtime=1261200000, serial_no=3,
-         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
+         admin_state=constants.ADMINST_UP,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_HVM,
          os="deb99",
          primary_node="nodebad2-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DISKLESS,
          disks=[],
          disks_active=True,
        objects.Instance(name="inst6", hvparams={}, nics=[],
          uuid="inst6-uuid",
          ctime=7513, mtime=11501, serial_no=13390,
-         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM,
+         admin_state=constants.ADMINST_DOWN,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_HVM,
          os="deb99",
          primary_node="node7-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DISKLESS,
          disks=[],
          disks_active=False,
        objects.Instance(name="inst7", hvparams={}, nics=[],
          uuid="inst7-uuid",
          ctime=None, mtime=None, serial_no=1947,
-         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM,
+         admin_state=constants.ADMINST_DOWN,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_HVM,
          os="deb99",
          primary_node="node6-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DISKLESS,
          disks=[],
          disks_active=False,
        objects.Instance(name="inst8", hvparams={}, nics=[],
          uuid="inst8-uuid",
          ctime=None, mtime=None, serial_no=19478,
-         admin_state=constants.ADMINST_OFFLINE, hypervisor=constants.HT_XEN_HVM,
+         admin_state=constants.ADMINST_OFFLINE,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_HVM,
          os="deb99",
          primary_node="node6-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DISKLESS,
          disks=[],
          disks_active=False,
          beparams={},
          osparams={}),
-       objects.Instance(name="inst9", hvparams={}, nics=[],
+       objects.Instance(
+         name="inst9",
+         hvparams={constants.HV_KVM_USER_SHUTDOWN: True},
+         nics=[],
          uuid="inst9-uuid",
          ctime=None, mtime=None, serial_no=19478,
-         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
+         admin_state=constants.ADMINST_UP,
+         admin_state_source=constants.ADMIN_SOURCE,
+         hypervisor=constants.HT_XEN_HVM,
          os="deb99",
          primary_node="node6-uuid",
 +        secondary_nodes=[],
          disk_template=constants.DT_DISKLESS,
          disks=[],
          disks_active=False,
@@@ -254,24 -250,8 +254,26 @@@ def _ConvertDiskAndCheckMissingSpindles
        if not "spindles" in dobj:
          missing_spindles = True
  
 +      if not "uuid" in dobj:
 +        dobj["uuid"] = utils.io.NewUUID()
 +  return missing_spindles
 +
 +
 +def UpgradeInstances(config_data):
 +  """Upgrades the instances' configuration."""
 +
 +  network2uuid = dict((n["name"], n["uuid"])
 +                      for n in config_data["networks"].values())
 +  if "instances" not in config_data:
 +    raise Error("Can't find the 'instances' key in the configuration!")
 +
 +  missing_spindles = False
 +  for instance, iobj in config_data["instances"].items():
 +    _ConvertNicNameToUuid(iobj, network2uuid)
 +    if _ConvertDiskAndCheckMissingSpindles(iobj, instance):
 +      missing_spindles = True
+     if "admin_state_source" not in iobj:
+       iobj["admin_state_source"] = constants.ADMIN_SOURCE
  
    if GetExclusiveStorageValue(config_data) and missing_spindles:
      # We cannot be sure that the instances that are missing spindles have
@@@ -447,12 -404,12 +449,11 @@@ def UpgradeAll(config_data)
  
  # DOWNGRADE ------------------------------------------------------------
  
 -def DowngradeInstances(config_data):
 -  for _, iobj in config_data["instances"].items():
 -    if "admin_state_source" in iobj:
 -      del iobj["admin_state_source"]
 -
 +def DowngradeNodeParams(config_object):
 +  if "ndparams" in config_object:
 +    if "cpu_speed" in config_object["ndparams"]:
 +      del config_object["ndparams"]["cpu_speed"]
  
  def DowngradeCluster(config_data):
    cluster = config_data.get("cluster", None)
    if not cluster: