Add default file_driver if missing
[ganeti-github.git] / lib / cmdlib / instance.py
index 9edf524..e922800 100644 (file)
 
 import OpenSSL
 import copy
-import itertools
 import logging
-import operator
 import os
-import time
 
 from ganeti import compat
 from ganeti import constants
@@ -41,38 +38,34 @@ from ganeti import netutils
 from ganeti import objects
 from ganeti import opcodes
 from ganeti import pathutils
-from ganeti import qlang
 from ganeti import rpc
 from ganeti import utils
-from ganeti import query
-
-from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
-  ResultWithJobs, Tasklet
-
-from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
-  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
-  _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
-  _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
-  _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
-  _GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
-  _GetUpdatedParams, _ExpandInstanceName, _ComputeIPolicySpecViolation, \
-  _CheckInstanceState, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _CreateDisks, \
-  _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, _CheckDiskConsistency, \
-  _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
-  _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
-  _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
-  _AssembleInstanceDisks, _ExpandCheckDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
-  _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
-  _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
-  _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
-  _GetInstanceInfoText, _RemoveDisks
+
+from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
+
+from ganeti.cmdlib.common import INSTANCE_DOWN, \
+  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
+  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
+  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
+  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
+  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
+  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
+from ganeti.cmdlib.instance_storage import CreateDisks, \
+  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
+  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
+  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
+  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
+  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
+  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
+  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
 
 import ganeti.masterd.instance
 
 
-#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: Type description for changes as returned by L{_ApplyContainerMods}'s
 #: callbacks
 _TApplyContModsCbChanges = \
   ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
@@ -272,7 +265,7 @@ def _CheckForConflictingIp(lu, ip, node):
 
 def _ComputeIPolicyInstanceSpecViolation(
   ipolicy, instance_spec, disk_template,
-  _compute_fn=_ComputeIPolicySpecViolation):
+  _compute_fn=ComputeIPolicySpecViolation):
   """Compute if instance specs meets the specs of ipolicy.
 
   @type ipolicy: dict
@@ -282,7 +275,7 @@ def _ComputeIPolicyInstanceSpecViolation(
   @type disk_template: string
   @param disk_template: the disk template of the instance
   @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{_ComputeIPolicySpecViolation}
+  @see: L{ComputeIPolicySpecViolation}
 
   """
   mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
@@ -320,80 +313,6 @@ def _CheckOSVariant(os_obj, name):
     raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
 
 
-def _CheckNodeHasOS(lu, node, os_name, force_variant):
-  """Ensure that a node supports a given OS.
-
-  @param lu: the LU on behalf of which we make the check
-  @param node: the node to check
-  @param os_name: the OS to query about
-  @param force_variant: whether to ignore variant errors
-  @raise errors.OpPrereqError: if the node is not supporting the OS
-
-  """
-  result = lu.rpc.call_os_get(node, os_name)
-  result.Raise("OS '%s' not in supported OS list for node %s" %
-               (os_name, node),
-               prereq=True, ecode=errors.ECODE_INVAL)
-  if not force_variant:
-    _CheckOSVariant(result.payload, os_name)
-
-
-def _CheckNicsBridgesExist(lu, target_nics, target_node):
-  """Check that the brigdes needed by a list of nics exist.
-
-  """
-  cluster = lu.cfg.GetClusterInfo()
-  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
-  brlist = [params[constants.NIC_LINK] for params in paramslist
-            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
-  if brlist:
-    result = lu.rpc.call_bridges_exist(target_node, brlist)
-    result.Raise("Error checking bridges on destination node '%s'" %
-                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
-
-
-def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
-  """Checks if a node has enough free memory.
-
-  This function checks if a given node has the needed amount of free
-  memory. In case the node has less memory or we cannot get the
-  information from the node, this function raises an OpPrereqError
-  exception.
-
-  @type lu: C{LogicalUnit}
-  @param lu: a logical unit from which we get configuration data
-  @type node: C{str}
-  @param node: the node to check
-  @type reason: C{str}
-  @param reason: string to use in the error message
-  @type requested: C{int}
-  @param requested: the amount of memory in MiB to check for
-  @type hypervisor_name: C{str}
-  @param hypervisor_name: the hypervisor to ask for memory stats
-  @rtype: integer
-  @return: node current free memory
-  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
-      we cannot check the node
-
-  """
-  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
-  nodeinfo[node].Raise("Can't get data from node %s" % node,
-                       prereq=True, ecode=errors.ECODE_ENVIRON)
-  (_, _, (hv_info, )) = nodeinfo[node].payload
-
-  free_mem = hv_info.get("memory_free", None)
-  if not isinstance(free_mem, int):
-    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
-                               " was '%s'" % (node, free_mem),
-                               errors.ECODE_ENVIRON)
-  if requested > free_mem:
-    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
-                               " needed %s MiB, available %s MiB" %
-                               (node, reason, requested, free_mem),
-                               errors.ECODE_NORES)
-  return free_mem
-
-
 class LUInstanceCreate(LogicalUnit):
   """Create an instance.
 
@@ -485,13 +404,19 @@ class LUInstanceCreate(LogicalUnit):
       raise errors.OpPrereqError("Invalid file driver name '%s'" %
                                  self.op.file_driver, errors.ECODE_INVAL)
 
+    # set default file_driver if unset and required
+    if (not self.op.file_driver and
+        self.op.disk_template in [constants.DT_FILE,
+                                  constants.DT_SHARED_FILE]):
+      self.op.file_driver = constants.FD_DEFAULT
+
     if self.op.disk_template == constants.DT_FILE:
       opcodes.RequireFileStorage()
     elif self.op.disk_template == constants.DT_SHARED_FILE:
       opcodes.RequireSharedFileStorage()
 
     ### Node/iallocator related checks
-    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
+    CheckIAllocatorOrNode(self, "iallocator", "pnode")
 
     if self.op.pnode is not None:
       if self.op.disk_template in constants.DTS_INT_MIRROR:
@@ -505,7 +430,7 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckOpportunisticLocking(self.op)
 
-    self._cds = _GetClusterDomainSecret()
+    self._cds = GetClusterDomainSecret()
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       # On import force_variant must be True, because if we forced it at
@@ -599,12 +524,11 @@ class LUInstanceCreate(LogicalUnit):
 
       if self.op.opportunistic_locking:
         self.opportunistic_locks[locking.LEVEL_NODE] = True
-        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
-      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
       nodelist = [self.op.pnode]
       if self.op.snode is not None:
-        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
+        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
         nodelist.append(self.op.snode)
       self.needed_locks[locking.LEVEL_NODE] = nodelist
 
@@ -625,7 +549,7 @@ class LUInstanceCreate(LogicalUnit):
                                      " requires a source node option",
                                      errors.ECODE_INVAL)
       else:
-        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
+        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
         if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
           self.needed_locks[locking.LEVEL_NODE].append(src_node)
         if not os.path.isabs(src_path):
@@ -633,7 +557,15 @@ class LUInstanceCreate(LogicalUnit):
             utils.PathJoin(pathutils.EXPORT_DIR, src_path)
 
     self.needed_locks[locking.LEVEL_NODE_RES] = \
-      _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES and \
+      self.opportunistic_locks[locking.LEVEL_NODE]:
+      # Even when using opportunistic locking, we require the same set of
+      # NODE_RES locks as we got NODE locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        self.owned_locks(locking.LEVEL_NODE)
 
   def _RunAllocator(self):
     """Run the allocator based on input opcode.
@@ -690,7 +622,7 @@ class LUInstanceCreate(LogicalUnit):
       env["SRC_PATH"] = self.op.src_path
       env["SRC_IMAGES"] = self.src_images
 
-    env.update(_BuildInstanceHookEnv(
+    env.update(BuildInstanceHookEnv(
       name=self.op.instance_name,
       primary_node=self.op.pnode,
       secondary_nodes=self.secondaries,
@@ -699,10 +631,11 @@ class LUInstanceCreate(LogicalUnit):
       minmem=self.be_full[constants.BE_MINMEM],
       maxmem=self.be_full[constants.BE_MAXMEM],
       vcpus=self.be_full[constants.BE_VCPUS],
-      nics=_NICListToTuple(self, self.nics),
+      nics=NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
-              d[constants.IDISK_MODE]) for d in self.disks],
+      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
+              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
+             for d in self.disks],
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -749,7 +682,7 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("No export found for relative path %s" %
                                    src_path, errors.ECODE_INVAL)
 
-    _CheckNodeOnline(self, src_node)
+    CheckNodeOnline(self, src_node)
     result = self.rpc.call_export_info(src_node, src_path)
     result.Raise("No export or invalid export found in dir %s" % src_path)
 
@@ -951,8 +884,8 @@ class LUInstanceCreate(LogicalUnit):
     hv_type.CheckParameterSyntax(filled_hvp)
     self.hv_full = filled_hvp
     # check that we don't specify global parameters on an instance
-    _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
-                          "instance", "cluster")
+    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+                         "instance", "cluster")
 
     # fill and remember the beparams dict
     self.be_full = _ComputeFullBeParams(self.op, cluster)
@@ -971,7 +904,7 @@ class LUInstanceCreate(LogicalUnit):
 
     # disk checks/pre-build
     default_vg = self.cfg.GetVGName()
-    self.disks = _ComputeDisks(self.op, default_vg)
+    self.disks = ComputeDisks(self.op, default_vg)
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       disk_images = []
@@ -1021,9 +954,9 @@ class LUInstanceCreate(LogicalUnit):
 
     # Release all unneeded node locks
     keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
-    _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
-    _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
-    _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
+    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
 
     assert (self.owned_locks(locking.LEVEL_NODE) ==
             self.owned_locks(locking.LEVEL_NODE_RES)), \
@@ -1090,9 +1023,9 @@ class LUInstanceCreate(LogicalUnit):
       if self.op.snode == pnode.name:
         raise errors.OpPrereqError("The secondary node cannot be the"
                                    " primary node", errors.ECODE_INVAL)
-      _CheckNodeOnline(self, self.op.snode)
-      _CheckNodeNotDrained(self, self.op.snode)
-      _CheckNodeVmCapable(self, self.op.snode)
+      CheckNodeOnline(self, self.op.snode)
+      CheckNodeNotDrained(self, self.op.snode)
+      CheckNodeVmCapable(self, self.op.snode)
       self.secondaries.append(self.op.snode)
 
       snode = self.cfg.GetNodeInfo(self.op.snode)
@@ -1106,7 +1039,7 @@ class LUInstanceCreate(LogicalUnit):
       nodes = [pnode]
       if self.op.disk_template in constants.DTS_INT_MIRROR:
         nodes.append(snode)
-      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
       if compat.any(map(has_es, nodes)):
         raise errors.OpPrereqError("Disk template %s not supported with"
                                    " exclusive storage" % self.op.disk_template,
@@ -1119,14 +1052,14 @@ class LUInstanceCreate(LogicalUnit):
         # _CheckRADOSFreeSpace() is just a placeholder.
         # Any function that checks prerequisites can be placed here.
         # Check if there is enough space on the RADOS cluster.
-        _CheckRADOSFreeSpace()
+        CheckRADOSFreeSpace()
       elif self.op.disk_template == constants.DT_EXT:
         # FIXME: Function that checks prereqs if needed
         pass
       else:
         # Check lv size requirements, if not adopting
-        req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
-        _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
+        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
 
     elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
       all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
@@ -1219,13 +1152,13 @@ class LUInstanceCreate(LogicalUnit):
              (pnode.group, group_info.name, utils.CommaJoin(res)))
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
-    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
+    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
-    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
+    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
     # check OS parameters (remotely)
-    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
+    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
 
-    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
+    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
     #TODO: _CheckExtParams (remotely)
     # Check parameters for extstorage
@@ -1233,10 +1166,10 @@ class LUInstanceCreate(LogicalUnit):
     # memory check on primary node
     #TODO(dynmem): use MINMEM for checking
     if self.op.start:
-      _CheckNodeFreeMemory(self, self.pnode.name,
-                           "creating instance %s" % self.op.instance_name,
-                           self.be_full[constants.BE_MAXMEM],
-                           self.op.hypervisor)
+      CheckNodeFreeMemory(self, self.pnode.name,
+                          "creating instance %s" % self.op.instance_name,
+                          self.be_full[constants.BE_MAXMEM],
+                          self.op.hypervisor)
 
     self.dry_run_result = list(nodenames)
 
@@ -1263,21 +1196,22 @@ class LUInstanceCreate(LogicalUnit):
     # has no disks yet (we are generating them right here).
     node = self.cfg.GetNodeInfo(pnode_name)
     nodegroup = self.cfg.GetNodeGroup(node.group)
-    disks = _GenerateDiskTemplate(self,
-                                  self.op.disk_template,
-                                  instance, pnode_name,
-                                  self.secondaries,
-                                  self.disks,
-                                  self.instance_file_storage_dir,
-                                  self.op.file_driver,
-                                  0,
-                                  feedback_fn,
-                                  self.cfg.GetGroupDiskParams(nodegroup))
+    disks = GenerateDiskTemplate(self,
+                                 self.op.disk_template,
+                                 instance, pnode_name,
+                                 self.secondaries,
+                                 self.disks,
+                                 self.instance_file_storage_dir,
+                                 self.op.file_driver,
+                                 0,
+                                 feedback_fn,
+                                 self.cfg.GetGroupDiskParams(nodegroup))
 
     iobj = objects.Instance(name=instance, os=self.op.os_type,
                             primary_node=pnode_name,
                             nics=self.nics, disks=disks,
                             disk_template=self.op.disk_template,
+                            disks_active=False,
                             admin_state=constants.ADMINST_DOWN,
                             network_port=network_port,
                             beparams=self.op.beparams,
@@ -1306,7 +1240,7 @@ class LUInstanceCreate(LogicalUnit):
     else:
       feedback_fn("* creating instance disks...")
       try:
-        _CreateDisks(self, iobj)
+        CreateDisks(self, iobj)
       except errors.OpExecError:
         self.LogWarning("Device creation failed")
         self.cfg.ReleaseDRBDMinors(instance)
@@ -1322,16 +1256,16 @@ class LUInstanceCreate(LogicalUnit):
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       # Release unused nodes
-      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
+      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
     else:
       # Release all nodes
-      _ReleaseLocks(self, locking.LEVEL_NODE)
+      ReleaseLocks(self, locking.LEVEL_NODE)
 
     disk_abort = False
     if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
       feedback_fn("* wiping instance disks...")
       try:
-        _WipeDisks(self, iobj)
+        WipeDisks(self, iobj)
       except errors.OpExecError, err:
         logging.exception("Wiping disks failed")
         self.LogWarning("Wiping instance disks failed (%s)", err)
@@ -1341,24 +1275,27 @@ class LUInstanceCreate(LogicalUnit):
       # Something is already wrong with the disks, don't do anything else
       pass
     elif self.op.wait_for_sync:
-      disk_abort = not _WaitForSync(self, iobj)
+      disk_abort = not WaitForSync(self, iobj)
     elif iobj.disk_template in constants.DTS_INT_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       feedback_fn("* checking mirrors status")
-      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
+      disk_abort = not WaitForSync(self, iobj, oneshot=True)
     else:
       disk_abort = False
 
     if disk_abort:
-      _RemoveDisks(self, iobj)
+      RemoveDisks(self, iobj)
       self.cfg.RemoveInstance(iobj.name)
       # Make sure the instance lock gets removed
       self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
       raise errors.OpExecError("There are some degraded disks for"
                                " this instance")
 
+    # instance disks are now active
+    iobj.disks_active = True
+
     # Release all node resource locks
-    _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
       # we need to set the disks ID to the primary node, since the
@@ -1501,7 +1438,7 @@ class LUInstanceRename(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env = BuildInstanceHookEnvByObject(self, self.instance)
     env["INSTANCE_NEW_NAME"] = self.op.new_name
     return env
 
@@ -1518,13 +1455,13 @@ class LUInstanceRename(LogicalUnit):
     This checks that the instance is in the cluster and is not running.
 
     """
-    self.op.instance_name = _ExpandInstanceName(self.cfg,
-                                                self.op.instance_name)
+    self.op.instance_name = ExpandInstanceName(self.cfg,
+                                               self.op.instance_name)
     instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert instance is not None
-    _CheckNodeOnline(self, instance.primary_node)
-    _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
-                        msg="cannot rename")
+    CheckNodeOnline(self, instance.primary_node)
+    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+                       msg="cannot rename")
     self.instance = instance
 
     new_name = self.op.new_name
@@ -1576,9 +1513,9 @@ class LUInstanceRename(LogicalUnit):
                    (inst.primary_node, old_file_storage_dir,
                     new_file_storage_dir))
 
-    _StartInstanceDisks(self, inst, None)
+    StartInstanceDisks(self, inst, None)
     # update info on disks
-    info = _GetInstanceInfoText(inst)
+    info = GetInstanceInfoText(inst)
     for (idx, disk) in enumerate(inst.disks):
       for node in inst.all_nodes:
         self.cfg.SetDiskID(disk, node)
@@ -1596,7 +1533,7 @@ class LUInstanceRename(LogicalUnit):
                (inst.name, inst.primary_node, msg))
         self.LogWarning(msg)
     finally:
-      _ShutdownInstanceDisks(self, inst)
+      ShutdownInstanceDisks(self, inst)
 
     return inst.name
 
@@ -1621,7 +1558,7 @@ class LUInstanceRemove(LogicalUnit):
     elif level == locking.LEVEL_NODE_RES:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -1629,7 +1566,7 @@ class LUInstanceRemove(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env = BuildInstanceHookEnvByObject(self, self.instance)
     env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
     return env
 
@@ -1677,16 +1614,7 @@ class LUInstanceRemove(LogicalUnit):
                 self.owned_locks(locking.LEVEL_NODE)), \
       "Not owning correct locks"
 
-    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
-
-
-def _CheckInstanceBridgesExist(lu, instance, node=None):
-  """Check that the brigdes needed by an instance exist.
-
-  """
-  if node is None:
-    node = instance.primary_node
-  _CheckNicsBridgesExist(lu, instance.nics, node)
+    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
 
 
 class LUInstanceMove(LogicalUnit):
@@ -1699,7 +1627,7 @@ class LUInstanceMove(LogicalUnit):
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
-    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+    target_node = ExpandNodeName(self.cfg, self.op.target_node)
     self.op.target_node = target_node
     self.needed_locks[locking.LEVEL_NODE] = [target_node]
     self.needed_locks[locking.LEVEL_NODE_RES] = []
@@ -1711,7 +1639,7 @@ class LUInstanceMove(LogicalUnit):
     elif level == locking.LEVEL_NODE_RES:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -1723,7 +1651,7 @@ class LUInstanceMove(LogicalUnit):
       "TARGET_NODE": self.op.target_node,
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
       }
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
     return env
 
   def BuildHooksNodes(self):
@@ -1769,27 +1697,27 @@ class LUInstanceMove(LogicalUnit):
         raise errors.OpPrereqError("Instance disk %d has a complex layout,"
                                    " cannot copy" % idx, errors.ECODE_STATE)
 
-    _CheckNodeOnline(self, target_node)
-    _CheckNodeNotDrained(self, target_node)
-    _CheckNodeVmCapable(self, target_node)
+    CheckNodeOnline(self, target_node)
+    CheckNodeNotDrained(self, target_node)
+    CheckNodeVmCapable(self, target_node)
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
-                            ignore=self.op.ignore_ipolicy)
+    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
+                           ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
       # check memory requirements on the secondary node
-      _CheckNodeFreeMemory(self, target_node,
-                           "failing over instance %s" %
-                           instance.name, bep[constants.BE_MAXMEM],
-                           instance.hypervisor)
+      CheckNodeFreeMemory(self, target_node,
+                          "failing over instance %s" %
+                          instance.name, bep[constants.BE_MAXMEM],
+                          instance.hypervisor)
     else:
       self.LogInfo("Not checking memory on the secondary node as"
                    " instance will not be started")
 
     # check bridge existance
-    _CheckInstanceBridgesExist(self, instance, node=target_node)
+    CheckInstanceBridgesExist(self, instance, node=target_node)
 
   def Exec(self, feedback_fn):
     """Move an instance.
@@ -1826,7 +1754,7 @@ class LUInstanceMove(LogicalUnit):
 
     # create the target disks
     try:
-      _CreateDisks(self, instance, target_node=target_node)
+      CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
       self.LogWarning("Device creation failed")
       self.cfg.ReleaseDRBDMinors(instance.name)
@@ -1858,7 +1786,7 @@ class LUInstanceMove(LogicalUnit):
     if errs:
       self.LogWarning("Some disks failed to copy, aborting")
       try:
-        _RemoveDisks(self, instance, target_node=target_node)
+        RemoveDisks(self, instance, target_node=target_node)
       finally:
         self.cfg.ReleaseDRBDMinors(instance.name)
         raise errors.OpExecError("Errors during disk copy: %s" %
@@ -1868,17 +1796,17 @@ class LUInstanceMove(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     self.LogInfo("Removing the disks on the original node")
-    _RemoveDisks(self, instance, target_node=source_node)
+    RemoveDisks(self, instance, target_node=source_node)
 
     # Only start the instance if it's marked as up
     if instance.admin_state == constants.ADMINST_UP:
       self.LogInfo("Starting instance %s on node %s",
                    instance.name, target_node)
 
-      disks_ok, _ = _AssembleInstanceDisks(self, instance,
-                                           ignore_secondaries=True)
+      disks_ok, _ = AssembleInstanceDisks(self, instance,
+                                          ignore_secondaries=True)
       if not disks_ok:
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Can't activate the instance's disks")
 
       result = self.rpc.call_instance_start(target_node,
@@ -1886,1264 +1814,246 @@ class LUInstanceMove(LogicalUnit):
                                             self.op.reason)
       msg = result.fail_msg
       if msg:
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
                                  (instance.name, target_node, msg))
 
 
-def _GetInstanceConsole(cluster, instance):
-  """Returns console information for an instance.
-
-  @type cluster: L{objects.Cluster}
-  @type instance: L{objects.Instance}
-  @rtype: dict
-
-  """
-  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
-  # beparams and hvparams are passed separately, to avoid editing the
-  # instance and then saving the defaults in the instance itself.
-  hvparams = cluster.FillHV(instance)
-  beparams = cluster.FillBE(instance)
-  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
-
-  assert console.instance == instance.name
-  assert console.Validate()
-
-  return console.ToDict()
-
-
-class _InstanceQuery(_QueryBase):
-  FIELDS = query.INSTANCE_FIELDS
-
-  def ExpandNames(self, lu):
-    lu.needed_locks = {}
-    lu.share_locks = _ShareAll()
-
-    if self.names:
-      self.wanted = _GetWantedInstances(lu, self.names)
-    else:
-      self.wanted = locking.ALL_SET
-
-    self.do_locking = (self.use_locking and
-                       query.IQ_LIVE in self.requested_data)
-    if self.do_locking:
-      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
-      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
-      lu.needed_locks[locking.LEVEL_NODE] = []
-      lu.needed_locks[locking.LEVEL_NETWORK] = []
-      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-
-    self.do_grouplocks = (self.do_locking and
-                          query.IQ_NODES in self.requested_data)
-
-  def DeclareLocks(self, lu, level):
-    if self.do_locking:
-      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
-        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
-
-        # Lock all groups used by instances optimistically; this requires going
-        # via the node before it's locked, requiring verification later on
-        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
-          set(group_uuid
-              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
-              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
-      elif level == locking.LEVEL_NODE:
-        lu._LockInstancesNodes() # pylint: disable=W0212
-
-      elif level == locking.LEVEL_NETWORK:
-        lu.needed_locks[locking.LEVEL_NETWORK] = \
-          frozenset(net_uuid
-                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
-                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
-
-  @staticmethod
-  def _CheckGroupLocks(lu):
-    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
-
-    # Check if node groups for locked instances are still correct
-    for instance_name in owned_instances:
-      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
-
-  def _GetQueryData(self, lu):
-    """Computes the list of instances and their attributes.
-
-    """
-    if self.do_grouplocks:
-      self._CheckGroupLocks(lu)
-
-    cluster = lu.cfg.GetClusterInfo()
-    all_info = lu.cfg.GetAllInstancesInfo()
-
-    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
-
-    instance_list = [all_info[name] for name in instance_names]
-    nodes = frozenset(itertools.chain(*(inst.all_nodes
-                                        for inst in instance_list)))
-    hv_list = list(set([inst.hypervisor for inst in instance_list]))
-    bad_nodes = []
-    offline_nodes = []
-    wrongnode_inst = set()
-
-    # Gather data as requested
-    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
-      live_data = {}
-      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
-      for name in nodes:
-        result = node_data[name]
-        if result.offline:
-          # offline nodes will be in both lists
-          assert result.fail_msg
-          offline_nodes.append(name)
-        if result.fail_msg:
-          bad_nodes.append(name)
-        elif result.payload:
-          for inst in result.payload:
-            if inst in all_info:
-              if all_info[inst].primary_node == name:
-                live_data.update(result.payload)
-              else:
-                wrongnode_inst.add(inst)
-            else:
-              # orphan instance; we don't list it here as we don't
-              # handle this case yet in the output of instance listing
-              logging.warning("Orphan instance '%s' found on node %s",
-                              inst, name)
-              # else no instance is alive
-    else:
-      live_data = {}
-
-    if query.IQ_DISKUSAGE in self.requested_data:
-      gmi = ganeti.masterd.instance
-      disk_usage = dict((inst.name,
-                         gmi.ComputeDiskSize(inst.disk_template,
-                                             [{constants.IDISK_SIZE: disk.size}
-                                              for disk in inst.disks]))
-                        for inst in instance_list)
-    else:
-      disk_usage = None
-
-    if query.IQ_CONSOLE in self.requested_data:
-      consinfo = {}
-      for inst in instance_list:
-        if inst.name in live_data:
-          # Instance is running
-          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
-        else:
-          consinfo[inst.name] = None
-      assert set(consinfo.keys()) == set(instance_names)
-    else:
-      consinfo = None
-
-    if query.IQ_NODES in self.requested_data:
-      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
-                                            instance_list)))
-      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
-      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
-                    for uuid in set(map(operator.attrgetter("group"),
-                                        nodes.values())))
-    else:
-      nodes = None
-      groups = None
-
-    if query.IQ_NETWORKS in self.requested_data:
-      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
-                                    for i in instance_list))
-      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
-    else:
-      networks = None
-
-    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
-                                   disk_usage, offline_nodes, bad_nodes,
-                                   live_data, wrongnode_inst, consinfo,
-                                   nodes, groups, networks)
-
-
-class LUInstanceQuery(NoHooksLU):
-  """Logical unit for querying instances.
+class LUInstanceMultiAlloc(NoHooksLU):
+  """Allocates multiple instances at the same time.
 
   """
-  # pylint: disable=W0142
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
-                             self.op.output_fields, self.op.use_locking)
-
-  def ExpandNames(self):
-    self.iq.ExpandNames(self)
+    """Check arguments.
 
-  def DeclareLocks(self, level):
-    self.iq.DeclareLocks(self, level)
+    """
+    nodes = []
+    for inst in self.op.instances:
+      if inst.iallocator is not None:
+        raise errors.OpPrereqError("iallocator are not allowed to be set on"
+                                   " instance objects", errors.ECODE_INVAL)
+      nodes.append(bool(inst.pnode))
+      if inst.disk_template in constants.DTS_INT_MIRROR:
+        nodes.append(bool(inst.snode))
 
-  def Exec(self, feedback_fn):
-    return self.iq.OldStyleQuery(self)
+    has_nodes = compat.any(nodes)
+    if compat.all(nodes) ^ has_nodes:
+      raise errors.OpPrereqError("There are instance objects providing"
+                                 " pnode/snode while others do not",
+                                 errors.ECODE_INVAL)
 
+    if not has_nodes and self.op.iallocator is None:
+      default_iallocator = self.cfg.GetDefaultIAllocator()
+      if default_iallocator:
+        self.op.iallocator = default_iallocator
+      else:
+        raise errors.OpPrereqError("No iallocator or nodes on the instances"
+                                   " given and no cluster-wide default"
+                                   " iallocator found; please specify either"
+                                   " an iallocator or nodes on the instances"
+                                   " or set a cluster-wide default iallocator",
+                                   errors.ECODE_INVAL)
 
-class LUInstanceQueryData(NoHooksLU):
-  """Query runtime instance data.
+    _CheckOpportunisticLocking(self.op)
 
-  """
-  REQ_BGL = False
+    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
+    if dups:
+      raise errors.OpPrereqError("There are duplicate instance names: %s" %
+                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
 
   def ExpandNames(self):
-    self.needed_locks = {}
-
-    # Use locking if requested or when non-static information is wanted
-    if not (self.op.static or self.op.use_locking):
-      self.LogWarning("Non-static data requested, locks need to be acquired")
-      self.op.use_locking = True
+    """Calculate the locks.
 
-    if self.op.instances or not self.op.use_locking:
-      # Expand instance names right here
-      self.wanted_names = _GetWantedInstances(self, self.op.instances)
-    else:
-      # Will use acquired locks
-      self.wanted_names = None
+    """
+    self.share_locks = ShareAll()
+    self.needed_locks = {
+      # iallocator will select nodes and even if no iallocator is used,
+      # collisions with LUInstanceCreate should be avoided
+      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+      }
 
-    if self.op.use_locking:
-      self.share_locks = _ShareAll()
+    if self.op.iallocator:
+      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
 
-      if self.wanted_names is None:
-        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
-      else:
-        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
+      if self.op.opportunistic_locking:
+        self.opportunistic_locks[locking.LEVEL_NODE] = True
+    else:
+      nodeslist = []
+      for inst in self.op.instances:
+        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
+        nodeslist.append(inst.pnode)
+        if inst.snode is not None:
+          inst.snode = ExpandNodeName(self.cfg, inst.snode)
+          nodeslist.append(inst.snode)
 
-      self.needed_locks[locking.LEVEL_NODEGROUP] = []
-      self.needed_locks[locking.LEVEL_NODE] = []
-      self.needed_locks[locking.LEVEL_NETWORK] = []
-      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+      self.needed_locks[locking.LEVEL_NODE] = nodeslist
+      # Lock resources of instance's primary and secondary nodes (copy to
+      # prevent accidential modification)
+      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
 
   def DeclareLocks(self, level):
-    if self.op.use_locking:
-      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
-      if level == locking.LEVEL_NODEGROUP:
-
-        # Lock all groups used by instances optimistically; this requires going
-        # via the node before it's locked, requiring verification later on
-        self.needed_locks[locking.LEVEL_NODEGROUP] = \
-          frozenset(group_uuid
-                    for instance_name in owned_instances
-                    for group_uuid in
-                    self.cfg.GetInstanceNodeGroups(instance_name))
-
-      elif level == locking.LEVEL_NODE:
-        self._LockInstancesNodes()
-
-      elif level == locking.LEVEL_NETWORK:
-        self.needed_locks[locking.LEVEL_NETWORK] = \
-          frozenset(net_uuid
-                    for instance_name in owned_instances
-                    for net_uuid in
-                    self.cfg.GetInstanceNetworks(instance_name))
+    if level == locking.LEVEL_NODE_RES and \
+      self.opportunistic_locks[locking.LEVEL_NODE]:
+      # Even when using opportunistic locking, we require the same set of
+      # NODE_RES locks as we got NODE locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        self.owned_locks(locking.LEVEL_NODE)
 
   def CheckPrereq(self):
-    """Check prerequisites.
-
-    This only checks the optional instance list against the existing names.
-
-    """
-    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
-    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
-
-    if self.wanted_names is None:
-      assert self.op.use_locking, "Locking was not used"
-      self.wanted_names = owned_instances
-
-    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
-
-    if self.op.use_locking:
-      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
-                                None)
-    else:
-      assert not (owned_instances or owned_groups or
-                  owned_nodes or owned_networks)
-
-    self.wanted_instances = instances.values()
-
-  def _ComputeBlockdevStatus(self, node, instance, dev):
-    """Returns the status of a block device
+    """Check prerequisite.
 
     """
-    if self.op.static or not node:
-      return None
-
-    self.cfg.SetDiskID(dev, node)
-
-    result = self.rpc.call_blockdev_find(node, dev)
-    if result.offline:
-      return None
+    if self.op.iallocator:
+      cluster = self.cfg.GetClusterInfo()
+      default_vg = self.cfg.GetVGName()
+      ec_id = self.proc.GetECId()
 
-    result.Raise("Can't compute disk status for %s" % instance.name)
+      if self.op.opportunistic_locking:
+        # Only consider nodes for which a lock is held
+        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
+      else:
+        node_whitelist = None
 
-    status = result.payload
-    if status is None:
-      return None
+      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+                                           _ComputeNics(op, cluster, None,
+                                                        self.cfg, ec_id),
+                                           _ComputeFullBeParams(op, cluster),
+                                           node_whitelist)
+               for op in self.op.instances]
 
-    return (status.dev_path, status.major, status.minor,
-            status.sync_percent, status.estimated_time,
-            status.is_degraded, status.ldisk_status)
+      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
-  def _ComputeDiskStatus(self, instance, snode, dev):
-    """Compute block device status.
+      ial.Run(self.op.iallocator)
 
-    """
-    (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
+      if not ial.success:
+        raise errors.OpPrereqError("Can't compute nodes using"
+                                   " iallocator '%s': %s" %
+                                   (self.op.iallocator, ial.info),
+                                   errors.ECODE_NORES)
 
-    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
+      self.ia_result = ial.result
 
-  def _ComputeDiskStatusInner(self, instance, snode, dev):
-    """Compute block device status.
+    if self.op.dry_run:
+      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
+        constants.JOB_IDS_KEY: [],
+        })
 
-    @attention: The device has to be annotated already.
+  def _ConstructPartialResult(self):
+    """Contructs the partial result.
 
     """
-    if dev.dev_type in constants.LDS_DRBD:
-      # we change the snode then (otherwise we use the one passed in)
-      if dev.logical_id[0] == instance.primary_node:
-        snode = dev.logical_id[1]
-      else:
-        snode = dev.logical_id[0]
-
-    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
-                                              instance, dev)
-    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
-
-    if dev.children:
-      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
-                                        instance, snode),
-                         dev.children)
+    if self.op.iallocator:
+      (allocatable, failed_insts) = self.ia_result
+      allocatable_insts = map(compat.fst, allocatable)
     else:
-      dev_children = []
+      allocatable_insts = [op.instance_name for op in self.op.instances]
+      failed_insts = []
 
     return {
-      "iv_name": dev.iv_name,
-      "dev_type": dev.dev_type,
-      "logical_id": dev.logical_id,
-      "physical_id": dev.physical_id,
-      "pstatus": dev_pstatus,
-      "sstatus": dev_sstatus,
-      "children": dev_children,
-      "mode": dev.mode,
-      "size": dev.size,
-      "name": dev.name,
-      "uuid": dev.uuid,
+      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
+      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
       }
 
   def Exec(self, feedback_fn):
-    """Gather and return data"""
-    result = {}
-
-    cluster = self.cfg.GetClusterInfo()
-
-    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
-    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
+    """Executes the opcode.
 
-    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
-                                                 for node in nodes.values()))
+    """
+    jobs = []
+    if self.op.iallocator:
+      op2inst = dict((op.instance_name, op) for op in self.op.instances)
+      (allocatable, failed) = self.ia_result
 
-    group2name_fn = lambda uuid: groups[uuid].name
-    for instance in self.wanted_instances:
-      pnode = nodes[instance.primary_node]
+      for (name, nodes) in allocatable:
+        op = op2inst.pop(name)
 
-      if self.op.static or pnode.offline:
-        remote_state = None
-        if pnode.offline:
-          self.LogWarning("Primary node %s is marked offline, returning static"
-                          " information only for instance %s" %
-                          (pnode.name, instance.name))
-      else:
-        remote_info = self.rpc.call_instance_info(instance.primary_node,
-                                                  instance.name,
-                                                  instance.hypervisor)
-        remote_info.Raise("Error checking node %s" % instance.primary_node)
-        remote_info = remote_info.payload
-        if remote_info and "state" in remote_info:
-          remote_state = "up"
+        if len(nodes) > 1:
+          (op.pnode, op.snode) = nodes
         else:
-          if instance.admin_state == constants.ADMINST_UP:
-            remote_state = "down"
-          else:
-            remote_state = instance.admin_state
-
-      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
-                  instance.disks)
-
-      snodes_group_uuids = [nodes[snode_name].group
-                            for snode_name in instance.secondary_nodes]
-
-      result[instance.name] = {
-        "name": instance.name,
-        "config_state": instance.admin_state,
-        "run_state": remote_state,
-        "pnode": instance.primary_node,
-        "pnode_group_uuid": pnode.group,
-        "pnode_group_name": group2name_fn(pnode.group),
-        "snodes": instance.secondary_nodes,
-        "snodes_group_uuids": snodes_group_uuids,
-        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
-        "os": instance.os,
-        # this happens to be the same format used for hooks
-        "nics": _NICListToTuple(self, instance.nics),
-        "disk_template": instance.disk_template,
-        "disks": disks,
-        "hypervisor": instance.hypervisor,
-        "network_port": instance.network_port,
-        "hv_instance": instance.hvparams,
-        "hv_actual": cluster.FillHV(instance, skip_globals=True),
-        "be_instance": instance.beparams,
-        "be_actual": cluster.FillBE(instance),
-        "os_instance": instance.osparams,
-        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
-        "serial_no": instance.serial_no,
-        "mtime": instance.mtime,
-        "ctime": instance.ctime,
-        "uuid": instance.uuid,
-        }
-
-    return result
+          (op.pnode,) = nodes
 
+        jobs.append([op])
 
-class LUInstanceStartup(LogicalUnit):
-  """Starts an instance.
+      missing = set(op2inst.keys()) - set(failed)
+      assert not missing, \
+        "Iallocator did return incomplete result: %s" % \
+        utils.CommaJoin(missing)
+    else:
+      jobs.extend([op] for op in self.op.instances)
 
-  """
-  HPATH = "instance-start"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
+    return ResultWithJobs(jobs, **self._ConstructPartialResult())
 
-  def CheckArguments(self):
-    # extra beparams
-    if self.op.beparams:
-      # fill the beparams dict
-      objects.UpgradeBeParams(self.op.beparams)
-      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
 
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+class _InstNicModPrivate:
+  """Data structure for network interface modifications.
 
-  def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODE_RES:
-      self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
+  Used by L{LUInstanceSetParams}.
 
-  def BuildHooksEnv(self):
-    """Build hooks env.
+  """
+  def __init__(self):
+    self.params = None
+    self.filled = None
 
-    This runs on master, primary and secondary nodes of the instance.
 
-    """
-    env = {
-      "FORCE": self.op.force,
-      }
+def _PrepareContainerMods(mods, private_fn):
+  """Prepares a list of container modifications by adding a private data field.
 
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+  @type mods: list of tuples; (operation, index, parameters)
+  @param mods: List of modifications
+  @type private_fn: callable or None
+  @param private_fn: Callable for constructing a private data field for a
+    modification
+  @rtype: list
 
-    return env
+  """
+  if private_fn is None:
+    fn = lambda: None
+  else:
+    fn = private_fn
 
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
+  return [(op, idx, params, fn()) for (op, idx, params) in mods]
 
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
 
-  def CheckPrereq(self):
-    """Check prerequisites.
+def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
+  """Checks if nodes have enough physical CPUs
 
-    This checks that the instance is in the cluster.
+  This function checks if all given nodes have the needed number of
+  physical CPUs. In case any node has less CPUs or we cannot get the
+  information from the node, this function raises an OpPrereqError
+  exception.
 
-    """
-    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
+  @type lu: C{LogicalUnit}
+  @param lu: a logical unit from which we get configuration data
+  @type nodenames: C{list}
+  @param nodenames: the list of node names to check
+  @type requested: C{int}
+  @param requested: the minimum acceptable number of physical CPUs
+  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
+      or we cannot check the node
 
-    # extra hvparams
-    if self.op.hvparams:
-      # check hypervisor parameter syntax (locally)
-      cluster = self.cfg.GetClusterInfo()
-      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
-      filled_hvp = cluster.FillHV(instance)
-      filled_hvp.update(self.op.hvparams)
-      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
-      hv_type.CheckParameterSyntax(filled_hvp)
-      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+  """
+  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
+  for node in nodenames:
+    info = nodeinfo[node]
+    info.Raise("Cannot get current information from node %s" % node,
+               prereq=True, ecode=errors.ECODE_ENVIRON)
+    (_, _, (hv_info, )) = info.payload
+    num_cpus = hv_info.get("cpu_total", None)
+    if not isinstance(num_cpus, int):
+      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
+                                 " on node %s, result was '%s'" %
+                                 (node, num_cpus), errors.ECODE_ENVIRON)
+    if requested > num_cpus:
+      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
+                                 "required" % (node, num_cpus, requested),
+                                 errors.ECODE_NORES)
 
-    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
 
-    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
-
-    if self.primary_offline and self.op.ignore_offline_nodes:
-      self.LogWarning("Ignoring offline primary node")
-
-      if self.op.hvparams or self.op.beparams:
-        self.LogWarning("Overridden parameters are ignored")
-    else:
-      _CheckNodeOnline(self, instance.primary_node)
-
-      bep = self.cfg.GetClusterInfo().FillBE(instance)
-      bep.update(self.op.beparams)
-
-      # check bridges existence
-      _CheckInstanceBridgesExist(self, instance)
-
-      remote_info = self.rpc.call_instance_info(instance.primary_node,
-                                                instance.name,
-                                                instance.hypervisor)
-      remote_info.Raise("Error checking node %s" % instance.primary_node,
-                        prereq=True, ecode=errors.ECODE_ENVIRON)
-      if not remote_info.payload: # not running already
-        _CheckNodeFreeMemory(self, instance.primary_node,
-                             "starting instance %s" % instance.name,
-                             bep[constants.BE_MINMEM], instance.hypervisor)
-
-  def Exec(self, feedback_fn):
-    """Start the instance.
-
-    """
-    instance = self.instance
-    force = self.op.force
-    reason = self.op.reason
-
-    if not self.op.no_remember:
-      self.cfg.MarkInstanceUp(instance.name)
-
-    if self.primary_offline:
-      assert self.op.ignore_offline_nodes
-      self.LogInfo("Primary node offline, marked instance as started")
-    else:
-      node_current = instance.primary_node
-
-      _StartInstanceDisks(self, instance, force)
-
-      result = \
-        self.rpc.call_instance_start(node_current,
-                                     (instance, self.op.hvparams,
-                                      self.op.beparams),
-                                     self.op.startup_paused, reason)
-      msg = result.fail_msg
-      if msg:
-        _ShutdownInstanceDisks(self, instance)
-        raise errors.OpExecError("Could not start instance: %s" % msg)
-
-
-class LUInstanceShutdown(LogicalUnit):
-  """Shutdown an instance.
-
-  """
-  HPATH = "instance-stop"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
-    env["TIMEOUT"] = self.op.timeout
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-
-    if not self.op.force:
-      _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
-    else:
-      self.LogWarning("Ignoring offline instance check")
-
-    self.primary_offline = \
-      self.cfg.GetNodeInfo(self.instance.primary_node).offline
-
-    if self.primary_offline and self.op.ignore_offline_nodes:
-      self.LogWarning("Ignoring offline primary node")
-    else:
-      _CheckNodeOnline(self, self.instance.primary_node)
-
-  def Exec(self, feedback_fn):
-    """Shutdown the instance.
-
-    """
-    instance = self.instance
-    node_current = instance.primary_node
-    timeout = self.op.timeout
-    reason = self.op.reason
-
-    # If the instance is offline we shouldn't mark it as down, as that
-    # resets the offline flag.
-    if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
-      self.cfg.MarkInstanceDown(instance.name)
-
-    if self.primary_offline:
-      assert self.op.ignore_offline_nodes
-      self.LogInfo("Primary node offline, marked instance as stopped")
-    else:
-      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
-                                               reason)
-      msg = result.fail_msg
-      if msg:
-        self.LogWarning("Could not shutdown instance: %s", msg)
-
-      _ShutdownInstanceDisks(self, instance)
-
-
-class LUInstanceReinstall(LogicalUnit):
-  """Reinstall an instance.
-
-  """
-  HPATH = "instance-reinstall"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    return _BuildInstanceHookEnvByObject(self, self.instance)
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster and is not running.
-
-    """
-    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
-                     " offline, cannot reinstall")
-
-    if instance.disk_template == constants.DT_DISKLESS:
-      raise errors.OpPrereqError("Instance '%s' has no disks" %
-                                 self.op.instance_name,
-                                 errors.ECODE_INVAL)
-    _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
-
-    if self.op.os_type is not None:
-      # OS verification
-      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
-      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
-      instance_os = self.op.os_type
-    else:
-      instance_os = instance.os
-
-    nodelist = list(instance.all_nodes)
-
-    if self.op.osparams:
-      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
-      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
-      self.os_inst = i_osdict # the new dict (without defaults)
-    else:
-      self.os_inst = None
-
-    self.instance = instance
-
-  def Exec(self, feedback_fn):
-    """Reinstall the instance.
-
-    """
-    inst = self.instance
-
-    if self.op.os_type is not None:
-      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
-      inst.os = self.op.os_type
-      # Write to configuration
-      self.cfg.Update(inst, feedback_fn)
-
-    _StartInstanceDisks(self, inst, None)
-    try:
-      feedback_fn("Running the instance OS create scripts...")
-      # FIXME: pass debug option from opcode to backend
-      result = self.rpc.call_instance_os_add(inst.primary_node,
-                                             (inst, self.os_inst), True,
-                                             self.op.debug_level)
-      result.Raise("Could not install OS for instance %s on node %s" %
-                   (inst.name, inst.primary_node))
-    finally:
-      _ShutdownInstanceDisks(self, inst)
-
-
-class LUInstanceReboot(LogicalUnit):
-  """Reboot an instance.
-
-  """
-  HPATH = "instance-reboot"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    env = {
-      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
-      "REBOOT_TYPE": self.op.reboot_type,
-      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
-      }
-
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
-    return (nl, nl)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
-    _CheckNodeOnline(self, instance.primary_node)
-
-    # check bridges existence
-    _CheckInstanceBridgesExist(self, instance)
-
-  def Exec(self, feedback_fn):
-    """Reboot the instance.
-
-    """
-    instance = self.instance
-    ignore_secondaries = self.op.ignore_secondaries
-    reboot_type = self.op.reboot_type
-    reason = self.op.reason
-
-    remote_info = self.rpc.call_instance_info(instance.primary_node,
-                                              instance.name,
-                                              instance.hypervisor)
-    remote_info.Raise("Error checking node %s" % instance.primary_node)
-    instance_running = bool(remote_info.payload)
-
-    node_current = instance.primary_node
-
-    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
-                                            constants.INSTANCE_REBOOT_HARD]:
-      for disk in instance.disks:
-        self.cfg.SetDiskID(disk, node_current)
-      result = self.rpc.call_instance_reboot(node_current, instance,
-                                             reboot_type,
-                                             self.op.shutdown_timeout, reason)
-      result.Raise("Could not reboot instance")
-    else:
-      if instance_running:
-        result = self.rpc.call_instance_shutdown(node_current, instance,
-                                                 self.op.shutdown_timeout,
-                                                 reason)
-        result.Raise("Could not shutdown instance for full reboot")
-        _ShutdownInstanceDisks(self, instance)
-      else:
-        self.LogInfo("Instance %s was already stopped, starting now",
-                     instance.name)
-      _StartInstanceDisks(self, instance, ignore_secondaries)
-      result = self.rpc.call_instance_start(node_current,
-                                            (instance, None, None), False,
-                                            reason)
-      msg = result.fail_msg
-      if msg:
-        _ShutdownInstanceDisks(self, instance)
-        raise errors.OpExecError("Could not start instance for"
-                                 " full reboot: %s" % msg)
-
-    self.cfg.MarkInstanceUp(instance.name)
-
-
-class LUInstanceConsole(NoHooksLU):
-  """Connect to an instance's console.
-
-  This is somewhat special in that it returns the command line that
-  you need to run on the master node in order to connect to the
-  console.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.share_locks = _ShareAll()
-    self._ExpandAndLockInstance()
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, self.instance.primary_node)
-
-  def Exec(self, feedback_fn):
-    """Connect to the console of an instance
-
-    """
-    instance = self.instance
-    node = instance.primary_node
-
-    node_insts = self.rpc.call_instance_list([node],
-                                             [instance.hypervisor])[node]
-    node_insts.Raise("Can't get node information from %s" % node)
-
-    if instance.name not in node_insts.payload:
-      if instance.admin_state == constants.ADMINST_UP:
-        state = constants.INSTST_ERRORDOWN
-      elif instance.admin_state == constants.ADMINST_DOWN:
-        state = constants.INSTST_ADMINDOWN
-      else:
-        state = constants.INSTST_ADMINOFFLINE
-      raise errors.OpExecError("Instance %s is not running (state %s)" %
-                               (instance.name, state))
-
-    logging.debug("Connecting to console of %s on %s", instance.name, node)
-
-    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
-
-
-def _DeclareLocksForMigration(lu, level):
-  """Declares locks for L{TLMigrateInstance}.
-
-  @type lu: L{LogicalUnit}
-  @param level: Lock level
-
-  """
-  if level == locking.LEVEL_NODE_ALLOC:
-    assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
-
-    instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
-
-    # Node locks are already declared here rather than at LEVEL_NODE as we need
-    # the instance object anyway to declare the node allocation lock.
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
-      if lu.op.target_node is None:
-        lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
-        lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-      else:
-        lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
-                                               lu.op.target_node]
-      del lu.recalculate_locks[locking.LEVEL_NODE]
-    else:
-      lu._LockInstancesNodes() # pylint: disable=W0212
-
-  elif level == locking.LEVEL_NODE:
-    # Node locks are declared together with the node allocation lock
-    assert (lu.needed_locks[locking.LEVEL_NODE] or
-            lu.needed_locks[locking.LEVEL_NODE] is locking.ALL_SET)
-
-  elif level == locking.LEVEL_NODE_RES:
-    # Copy node locks
-    lu.needed_locks[locking.LEVEL_NODE_RES] = \
-      _CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
-
-
-def _ExpandNamesForMigration(lu):
-  """Expands names for use with L{TLMigrateInstance}.
-
-  @type lu: L{LogicalUnit}
-
-  """
-  if lu.op.target_node is not None:
-    lu.op.target_node = _ExpandNodeName(lu.cfg, lu.op.target_node)
-
-  lu.needed_locks[locking.LEVEL_NODE] = []
-  lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-
-  lu.needed_locks[locking.LEVEL_NODE_RES] = []
-  lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
-
-  # The node allocation lock is actually only needed for externally replicated
-  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
-  lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
-
-
-class LUInstanceFailover(LogicalUnit):
-  """Failover an instance.
-
-  """
-  HPATH = "instance-failover"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    """Check the arguments.
-
-    """
-    self.iallocator = getattr(self.op, "iallocator", None)
-    self.target_node = getattr(self.op, "target_node", None)
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-    _ExpandNamesForMigration(self)
-
-    self._migrater = \
-      TLMigrateInstance(self, self.op.instance_name, False, True, False,
-                        self.op.ignore_consistency, True,
-                        self.op.shutdown_timeout, self.op.ignore_ipolicy)
-
-    self.tasklets = [self._migrater]
-
-  def DeclareLocks(self, level):
-    _DeclareLocksForMigration(self, level)
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    instance = self._migrater.instance
-    source_node = instance.primary_node
-    target_node = self.op.target_node
-    env = {
-      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
-      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
-      "OLD_PRIMARY": source_node,
-      "NEW_PRIMARY": target_node,
-      }
-
-    if instance.disk_template in constants.DTS_INT_MIRROR:
-      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
-      env["NEW_SECONDARY"] = source_node
-    else:
-      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
-
-    env.update(_BuildInstanceHookEnvByObject(self, instance))
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    instance = self._migrater.instance
-    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
-    return (nl, nl + [instance.primary_node])
-
-
-class LUInstanceMigrate(LogicalUnit):
-  """Migrate an instance.
-
-  This is migration without shutting down, compared to the failover,
-  which is done with shutdown.
-
-  """
-  HPATH = "instance-migrate"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-    _ExpandNamesForMigration(self)
-
-    self._migrater = \
-      TLMigrateInstance(self, self.op.instance_name, self.op.cleanup,
-                        False, self.op.allow_failover, False,
-                        self.op.allow_runtime_changes,
-                        constants.DEFAULT_SHUTDOWN_TIMEOUT,
-                        self.op.ignore_ipolicy)
-
-    self.tasklets = [self._migrater]
-
-  def DeclareLocks(self, level):
-    _DeclareLocksForMigration(self, level)
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    instance = self._migrater.instance
-    source_node = instance.primary_node
-    target_node = self.op.target_node
-    env = _BuildInstanceHookEnvByObject(self, instance)
-    env.update({
-      "MIGRATE_LIVE": self._migrater.live,
-      "MIGRATE_CLEANUP": self.op.cleanup,
-      "OLD_PRIMARY": source_node,
-      "NEW_PRIMARY": target_node,
-      "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
-      })
-
-    if instance.disk_template in constants.DTS_INT_MIRROR:
-      env["OLD_SECONDARY"] = target_node
-      env["NEW_SECONDARY"] = source_node
-    else:
-      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    instance = self._migrater.instance
-    snodes = list(instance.secondary_nodes)
-    nl = [self.cfg.GetMasterNode(), instance.primary_node] + snodes
-    return (nl, nl)
-
-
-class LUInstanceMultiAlloc(NoHooksLU):
-  """Allocates multiple instances at the same time.
-
-  """
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    """Check arguments.
-
-    """
-    nodes = []
-    for inst in self.op.instances:
-      if inst.iallocator is not None:
-        raise errors.OpPrereqError("iallocator are not allowed to be set on"
-                                   " instance objects", errors.ECODE_INVAL)
-      nodes.append(bool(inst.pnode))
-      if inst.disk_template in constants.DTS_INT_MIRROR:
-        nodes.append(bool(inst.snode))
-
-    has_nodes = compat.any(nodes)
-    if compat.all(nodes) ^ has_nodes:
-      raise errors.OpPrereqError("There are instance objects providing"
-                                 " pnode/snode while others do not",
-                                 errors.ECODE_INVAL)
-
-    if self.op.iallocator is None:
-      default_iallocator = self.cfg.GetDefaultIAllocator()
-      if default_iallocator and has_nodes:
-        self.op.iallocator = default_iallocator
-      else:
-        raise errors.OpPrereqError("No iallocator or nodes on the instances"
-                                   " given and no cluster-wide default"
-                                   " iallocator found; please specify either"
-                                   " an iallocator or nodes on the instances"
-                                   " or set a cluster-wide default iallocator",
-                                   errors.ECODE_INVAL)
-
-    _CheckOpportunisticLocking(self.op)
-
-    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
-    if dups:
-      raise errors.OpPrereqError("There are duplicate instance names: %s" %
-                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
-
-  def ExpandNames(self):
-    """Calculate the locks.
-
-    """
-    self.share_locks = _ShareAll()
-    self.needed_locks = {
-      # iallocator will select nodes and even if no iallocator is used,
-      # collisions with LUInstanceCreate should be avoided
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-      }
-
-    if self.op.iallocator:
-      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
-      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
-
-      if self.op.opportunistic_locking:
-        self.opportunistic_locks[locking.LEVEL_NODE] = True
-        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
-    else:
-      nodeslist = []
-      for inst in self.op.instances:
-        inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
-        nodeslist.append(inst.pnode)
-        if inst.snode is not None:
-          inst.snode = _ExpandNodeName(self.cfg, inst.snode)
-          nodeslist.append(inst.snode)
-
-      self.needed_locks[locking.LEVEL_NODE] = nodeslist
-      # Lock resources of instance's primary and secondary nodes (copy to
-      # prevent accidential modification)
-      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
-
-  def CheckPrereq(self):
-    """Check prerequisite.
-
-    """
-    cluster = self.cfg.GetClusterInfo()
-    default_vg = self.cfg.GetVGName()
-    ec_id = self.proc.GetECId()
-
-    if self.op.opportunistic_locking:
-      # Only consider nodes for which a lock is held
-      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
-    else:
-      node_whitelist = None
-
-    insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
-                                         _ComputeNics(op, cluster, None,
-                                                      self.cfg, ec_id),
-                                         _ComputeFullBeParams(op, cluster),
-                                         node_whitelist)
-             for op in self.op.instances]
-
-    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
-    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
-
-    ial.Run(self.op.iallocator)
-
-    if not ial.success:
-      raise errors.OpPrereqError("Can't compute nodes using"
-                                 " iallocator '%s': %s" %
-                                 (self.op.iallocator, ial.info),
-                                 errors.ECODE_NORES)
-
-    self.ia_result = ial.result
-
-    if self.op.dry_run:
-      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
-        constants.JOB_IDS_KEY: [],
-        })
-
-  def _ConstructPartialResult(self):
-    """Contructs the partial result.
-
-    """
-    (allocatable, failed) = self.ia_result
-    return {
-      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
-        map(compat.fst, allocatable),
-      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
-      }
-
-  def Exec(self, feedback_fn):
-    """Executes the opcode.
-
-    """
-    op2inst = dict((op.instance_name, op) for op in self.op.instances)
-    (allocatable, failed) = self.ia_result
-
-    jobs = []
-    for (name, nodes) in allocatable:
-      op = op2inst.pop(name)
-
-      if len(nodes) > 1:
-        (op.pnode, op.snode) = nodes
-      else:
-        (op.pnode,) = nodes
-
-      jobs.append([op])
-
-    missing = set(op2inst.keys()) - set(failed)
-    assert not missing, \
-      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
-
-    return ResultWithJobs(jobs, **self._ConstructPartialResult())
-
-
-class _InstNicModPrivate:
-  """Data structure for network interface modifications.
-
-  Used by L{LUInstanceSetParams}.
-
-  """
-  def __init__(self):
-    self.params = None
-    self.filled = None
-
-
-def PrepareContainerMods(mods, private_fn):
-  """Prepares a list of container modifications by adding a private data field.
-
-  @type mods: list of tuples; (operation, index, parameters)
-  @param mods: List of modifications
-  @type private_fn: callable or None
-  @param private_fn: Callable for constructing a private data field for a
-    modification
-  @rtype: list
-
-  """
-  if private_fn is None:
-    fn = lambda: None
-  else:
-    fn = private_fn
-
-  return [(op, idx, params, fn()) for (op, idx, params) in mods]
-
-
-def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
-  """Checks if nodes have enough physical CPUs
-
-  This function checks if all given nodes have the needed number of
-  physical CPUs. In case any node has less CPUs or we cannot get the
-  information from the node, this function raises an OpPrereqError
-  exception.
-
-  @type lu: C{LogicalUnit}
-  @param lu: a logical unit from which we get configuration data
-  @type nodenames: C{list}
-  @param nodenames: the list of node names to check
-  @type requested: C{int}
-  @param requested: the minimum acceptable number of physical CPUs
-  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
-      or we cannot check the node
-
-  """
-  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
-  for node in nodenames:
-    info = nodeinfo[node]
-    info.Raise("Cannot get current information from node %s" % node,
-               prereq=True, ecode=errors.ECODE_ENVIRON)
-    (_, _, (hv_info, )) = info.payload
-    num_cpus = hv_info.get("cpu_total", None)
-    if not isinstance(num_cpus, int):
-      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
-                                 " on node %s, result was '%s'" %
-                                 (node, num_cpus), errors.ECODE_ENVIRON)
-    if requested > num_cpus:
-      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
-                                 "required" % (node, num_cpus, requested),
-                                 errors.ECODE_NORES)
-
-
-def GetItemFromContainer(identifier, kind, container):
-  """Return the item refered by the identifier.
+def GetItemFromContainer(identifier, kind, container):
+  """Return the item refered by the identifier.
 
   @type identifier: string
   @param identifier: Item index or name or UUID
@@ -3178,8 +2088,8 @@ def GetItemFromContainer(identifier, kind, container):
                              (kind, identifier), errors.ECODE_NOENT)
 
 
-def ApplyContainerMods(kind, container, chgdesc, mods,
-                       create_fn, modify_fn, remove_fn):
+def _ApplyContainerMods(kind, container, chgdesc, mods,
+                        create_fn, modify_fn, remove_fn):
   """Applies descriptions in C{mods} to C{container}.
 
   @type kind: string
@@ -3189,20 +2099,20 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
   @type chgdesc: None or list
   @param chgdesc: List of applied changes
   @type mods: list
-  @param mods: Modifications as returned by L{PrepareContainerMods}
+  @param mods: Modifications as returned by L{_PrepareContainerMods}
   @type create_fn: callable
   @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
     receives absolute item index, parameters and private data object as added
-    by L{PrepareContainerMods}, returns tuple containing new item and changes
+    by L{_PrepareContainerMods}, returns tuple containing new item and changes
     as list
   @type modify_fn: callable
   @param modify_fn: Callback for modifying an existing item
     (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
-    and private data object as added by L{PrepareContainerMods}, returns
+    and private data object as added by L{_PrepareContainerMods}, returns
     changes as list
   @type remove_fn: callable
   @param remove_fn: Callback on removing item; receives absolute item index,
-    item and private data object as added by L{PrepareContainerMods}
+    item and private data object as added by L{_PrepareContainerMods}
 
   """
   for (op, identifier, params, private) in mods:
@@ -3426,13 +2336,13 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem or
-            self.op.pnode):
+            self.op.osparams or self.op.offline is not None or
+            self.op.runtime_mem or self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
-      _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
-                            "hypervisor", "instance", "cluster")
+      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+                           "hypervisor", "instance", "cluster")
 
     self.op.disks = self._UpgradeDiskNicMods(
       "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
@@ -3456,7 +2366,7 @@ class LUInstanceSetParams(LogicalUnit):
                     self._VerifyNicModification)
 
     if self.op.pnode:
-      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
@@ -3479,12 +2389,12 @@ class LUInstanceSetParams(LogicalUnit):
     elif level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
       if self.op.disk_template and self.op.remote_node:
-        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
         self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
     elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -3509,11 +2419,11 @@ class LUInstanceSetParams(LogicalUnit):
         n = copy.deepcopy(nic)
         nicparams = self.cluster.SimpleFillNIC(n.nicparams)
         n.nicparams = nicparams
-        nics.append(_NICToTuple(self, n))
+        nics.append(NICToTuple(self, n))
 
       args["nics"] = nics
 
-    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
+    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
     if self.op.disk_template:
       env["NEW_DISK_TEMPLATE"] = self.op.disk_template
     if self.op.runtime_mem:
@@ -3555,7 +2465,7 @@ class LUInstanceSetParams(LogicalUnit):
                                    new_net_obj.name, errors.ECODE_INVAL)
       new_params = dict(netparams)
     else:
-      new_params = _GetUpdatedParams(old_params, update_params_dict)
+      new_params = GetUpdatedParams(old_params, update_params_dict)
 
     utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
 
@@ -3684,29 +2594,29 @@ class LUInstanceSetParams(LogicalUnit):
                                  " %s to %s" % (instance.disk_template,
                                                 self.op.disk_template),
                                  errors.ECODE_INVAL)
-    _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                        msg="cannot change disk template")
+    CheckInstanceState(self, instance, INSTANCE_DOWN,
+                       msg="cannot change disk template")
     if self.op.disk_template in constants.DTS_INT_MIRROR:
       if self.op.remote_node == pnode:
         raise errors.OpPrereqError("Given new secondary node %s is the same"
                                    " as the primary node of the instance" %
                                    self.op.remote_node, errors.ECODE_STATE)
-      _CheckNodeOnline(self, self.op.remote_node)
-      _CheckNodeNotDrained(self, self.op.remote_node)
+      CheckNodeOnline(self, self.op.remote_node)
+      CheckNodeNotDrained(self, self.op.remote_node)
       # FIXME: here we assume that the old instance type is DT_PLAIN
       assert instance.disk_template == constants.DT_PLAIN
       disks = [{constants.IDISK_SIZE: d.size,
                 constants.IDISK_VG: d.logical_id[0]}
                for d in instance.disks]
-      required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
-      _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
+      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
 
       snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
-                              ignore=self.op.ignore_ipolicy)
+      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
+                             ignore=self.op.ignore_ipolicy)
       if pnode_info.group != snode_info.group:
         self.LogWarning("The primary and secondary nodes are in two"
                         " different node groups; the disk parameters"
@@ -3719,7 +2629,7 @@ class LUInstanceSetParams(LogicalUnit):
       if self.op.disk_template in constants.DTS_INT_MIRROR:
         assert snode_info
         nodes.append(snode_info)
-      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
       if compat.any(map(has_es, nodes)):
         errmsg = ("Cannot convert disk template from %s to %s when exclusive"
                   " storage is enabled" % (instance.disk_template,
@@ -3777,8 +2687,8 @@ class LUInstanceSetParams(LogicalUnit):
                       self._VerifyDiskModification)
 
     # Prepare disk/NIC modifications
-    self.diskmod = PrepareContainerMods(self.op.disks, None)
-    self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+    self.diskmod = _PrepareContainerMods(self.op.disks, None)
+    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
 
     # Check the validity of the `provider' parameter
     if instance.disk_template in constants.DT_EXT:
@@ -3809,8 +2719,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # OS change
     if self.op.os_name and not self.op.force:
-      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
-                      self.op.force_variant)
+      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
+                     self.op.force_variant)
       instance_os = self.op.os_name
     else:
       instance_os = instance.os
@@ -3824,13 +2734,13 @@ class LUInstanceSetParams(LogicalUnit):
     # hvparams processing
     if self.op.hvparams:
       hv_type = instance.hypervisor
-      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
+      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
       utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
       hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
 
       # local check
       hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
-      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
+      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
       self.hv_proposed = self.hv_new = hv_new # the new actual values
       self.hv_inst = i_hvdict # the new dict (without defaults)
     else:
@@ -3840,8 +2750,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # beparams processing
     if self.op.beparams:
-      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
-                                   use_none=True)
+      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
+                                  use_none=True)
       objects.UpgradeBeParams(i_bedict)
       utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
       be_new = cluster.SimpleFillBE(i_bedict)
@@ -3881,8 +2791,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # osparams processing
     if self.op.osparams:
-      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
-      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
+      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
       self.os_inst = i_osdict # the new dict (without defaults)
     else:
       self.os_inst = {}
@@ -3971,9 +2881,9 @@ class LUInstanceSetParams(LogicalUnit):
 
       delta = self.op.runtime_mem - current_memory
       if delta > 0:
-        _CheckNodeFreeMemory(self, instance.primary_node,
-                             "ballooning memory for instance %s" %
-                             instance.name, delta, instance.hypervisor)
+        CheckNodeFreeMemory(self, instance.primary_node,
+                            "ballooning memory for instance %s" %
+                            instance.name, delta, instance.hypervisor)
 
     if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Disk operations not supported for"
@@ -3997,8 +2907,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # Verify NIC changes (operating on copy)
     nics = instance.nics[:]
-    ApplyContainerMods("NIC", nics, None, self.nicmod,
-                       _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
+    _ApplyContainerMods("NIC", nics, None, self.nicmod,
+                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
     if len(nics) > constants.MAX_NICS:
       raise errors.OpPrereqError("Instance has too many network interfaces"
                                  " (%d), cannot add more" % constants.MAX_NICS,
@@ -4009,8 +2919,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # Verify disk changes (operating on a copy)
     disks = copy.deepcopy(instance.disks)
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
-                       None)
+    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
+                        _PrepareDiskMod, None)
     utils.ValidateDeviceNames("disk", disks)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
@@ -4023,16 +2933,16 @@ class LUInstanceSetParams(LogicalUnit):
     ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
 
     if self.op.offline is not None and self.op.offline:
-      _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
-                          msg="can't change to offline")
+      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+                         msg="can't change to offline")
 
     # Pre-compute NIC changes (necessary to use result in hooks)
     self._nic_chgdesc = []
     if self.nicmod:
       # Operate on copies as this is still in prereq
       nics = [nic.Copy() for nic in instance.nics]
-      ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
-                         self._CreateNewNic, self._ApplyNicMods, None)
+      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+                          self._CreateNewNic, self._ApplyNicMods, None)
       # Verify that NIC names are unique and valid
       utils.ValidateDeviceNames("NIC", nics)
       self._new_nics = nics
@@ -4091,24 +3001,24 @@ class LUInstanceSetParams(LogicalUnit):
                   constants.IDISK_VG: d.logical_id[0],
                   constants.IDISK_NAME: d.name}
                  for d in instance.disks]
-    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
-                                      instance.name, pnode, [snode],
-                                      disk_info, None, None, 0, feedback_fn,
-                                      self.diskparams)
+    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
+                                     instance.name, pnode, [snode],
+                                     disk_info, None, None, 0, feedback_fn,
+                                     self.diskparams)
     anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
                                         self.diskparams)
-    p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
-    s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
-    info = _GetInstanceInfoText(instance)
+    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
+    info = GetInstanceInfoText(instance)
     feedback_fn("Creating additional volumes...")
     # first, create the missing data and meta devices
     for disk in anno_disks:
       # unfortunately this is... not too nice
-      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
-                            info, True, p_excl_stor)
+      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
+                           info, True, p_excl_stor)
       for child in disk.children:
-        _CreateSingleBlockDev(self, snode, instance, child, info, True,
-                              s_excl_stor)
+        CreateSingleBlockDev(self, snode, instance, child, info, True,
+                             s_excl_stor)
     # at this stage, all new LVs have been created, we can rename the
     # old ones
     feedback_fn("Renaming original volumes...")
@@ -4123,8 +3033,8 @@ class LUInstanceSetParams(LogicalUnit):
       for disk in anno_disks:
         for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
           f_create = node == pnode
-          _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
-                                excl_stor)
+          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+                               excl_stor)
     except errors.GenericError, e:
       feedback_fn("Initializing of DRBD devices failed;"
                   " renaming back original volumes...")
@@ -4142,11 +3052,11 @@ class LUInstanceSetParams(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     # Release node locks while waiting for sync
-    _ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE)
 
     # disks are created, waiting for sync
-    disk_abort = not _WaitForSync(self, instance,
-                                  oneshot=not self.op.wait_for_sync)
+    disk_abort = not WaitForSync(self, instance,
+                                 oneshot=not self.op.wait_for_sync)
     if disk_abort:
       raise errors.OpExecError("There are some degraded disks for"
                                " this instance, please cleanup manually")
@@ -4166,7 +3076,7 @@ class LUInstanceSetParams(LogicalUnit):
     snode = instance.secondary_nodes[0]
     feedback_fn("Converting template to plain")
 
-    old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
+    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
     new_disks = [d.children[0] for d in instance.disks]
 
     # copy over size, mode and name
@@ -4188,7 +3098,7 @@ class LUInstanceSetParams(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
-    _ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE)
 
     feedback_fn("Removing volumes on the secondary node...")
     for disk in old_disks:
@@ -4221,29 +3131,18 @@ class LUInstanceSetParams(LogicalUnit):
       file_driver = file_path = None
 
     disk = \
-      _GenerateDiskTemplate(self, instance.disk_template, instance.name,
-                            instance.primary_node, instance.secondary_nodes,
-                            [params], file_path, file_driver, idx,
-                            self.Log, self.diskparams)[0]
-
-    info = _GetInstanceInfoText(instance)
-
-    logging.info("Creating volume %s for instance %s",
-                 disk.iv_name, instance.name)
-    # Note: this needs to be kept in sync with _CreateDisks
-    #HARDCODE
-    for node in instance.all_nodes:
-      f_create = (node == instance.primary_node)
-      try:
-        _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
-      except errors.OpExecError, err:
-        self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
-                        disk.iv_name, disk, node, err)
+      GenerateDiskTemplate(self, instance.disk_template, instance.name,
+                           instance.primary_node, instance.secondary_nodes,
+                           [params], file_path, file_driver, idx,
+                           self.Log, self.diskparams)[0]
+
+    new_disks = CreateDisks(self, instance, disks=[disk])
 
     if self.cluster.prealloc_wipe_disks:
       # Wipe new disk
-      _WipeDisks(self, instance,
-                 disks=[(idx, disk, 0)])
+      WipeOrCleanupDisks(self, instance,
+                         disks=[(idx, disk, 0)],
+                         cleanup=new_disks)
 
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
@@ -4255,14 +3154,13 @@ class LUInstanceSetParams(LogicalUnit):
 
     """
     changes = []
-    mode = params.get(constants.IDISK_MODE, None)
-    if mode:
-      disk.mode = mode
+    if constants.IDISK_MODE in params:
+      disk.mode = params.get(constants.IDISK_MODE)
       changes.append(("disk.mode/%d" % idx, disk.mode))
 
-    name = params.get(constants.IDISK_NAME, None)
-    disk.name = name
-    changes.append(("disk.name/%d" % idx, disk.name))
+    if constants.IDISK_NAME in params:
+      disk.name = params.get(constants.IDISK_NAME)
+      changes.append(("disk.name/%d" % idx, disk.name))
 
     return changes
 
@@ -4270,7 +3168,7 @@ class LUInstanceSetParams(LogicalUnit):
     """Removes a disk.
 
     """
-    (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
+    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
     for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
       self.cfg.SetDiskID(disk, node)
       msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
@@ -4362,8 +3260,9 @@ class LUInstanceSetParams(LogicalUnit):
       result.append(("runtime_memory", self.op.runtime_mem))
 
     # Apply disk changes
-    ApplyContainerMods("disk", instance.disks, result, self.diskmod,
-                       self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
+    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
+                        self._CreateNewDisk, self._ModifyDisk,
+                        self._RemoveDisk)
     _UpdateIvNames(0, instance.disks)
 
     if self.op.disk_template:
@@ -4377,7 +3276,7 @@ class LUInstanceSetParams(LogicalUnit):
             ("Not owning the correct locks, owning %r, expected at least %r" %
              (owned, check_nodes))
 
-      r_shut = _ShutdownInstanceDisks(self, instance)
+      r_shut = ShutdownInstanceDisks(self, instance)
       if not r_shut:
         raise errors.OpExecError("Cannot shutdown instance disks, unable to"
                                  " proceed with disk template conversion")
@@ -4395,8 +3294,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # Release node and resource locks if there are any (they might already have
     # been released during disk conversion)
-    _ReleaseLocks(self, locking.LEVEL_NODE)
-    _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+    ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     # Apply NIC changes
     if self._new_nics is not None:
@@ -4457,7 +3356,7 @@ class LUInstanceChangeGroup(LogicalUnit):
   REQ_BGL = False
 
   def ExpandNames(self):
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: [],
@@ -4473,7 +3372,7 @@ class LUInstanceChangeGroup(LogicalUnit):
     else:
       self.req_target_uuids = None
 
-    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODEGROUP:
@@ -4526,8 +3425,8 @@ class LUInstanceChangeGroup(LogicalUnit):
       ("Instance %s's nodes changed while we kept the lock" %
        self.op.instance_name)
 
-    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
-                                           owned_groups)
+    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+                                          owned_groups)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -4558,7 +3457,7 @@ class LUInstanceChangeGroup(LogicalUnit):
       "TARGET_GROUPS": " ".join(self.target_uuids),
       }
 
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
 
     return env
 
@@ -4586,713 +3485,9 @@ class LUInstanceChangeGroup(LogicalUnit):
                                  (self.op.instance_name, self.op.iallocator,
                                   ial.info), errors.ECODE_NORES)
 
-    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
 
     self.LogInfo("Iallocator returned %s job(s) for changing group of"
                  " instance '%s'", len(jobs), self.op.instance_name)
 
     return ResultWithJobs(jobs)
-
-
-class TLMigrateInstance(Tasklet):
-  """Tasklet class for instance migration.
-
-  @type live: boolean
-  @ivar live: whether the migration will be done live or non-live;
-      this variable is initalized only after CheckPrereq has run
-  @type cleanup: boolean
-  @ivar cleanup: Wheater we cleanup from a failed migration
-  @type iallocator: string
-  @ivar iallocator: The iallocator used to determine target_node
-  @type target_node: string
-  @ivar target_node: If given, the target_node to reallocate the instance to
-  @type failover: boolean
-  @ivar failover: Whether operation results in failover or migration
-  @type fallback: boolean
-  @ivar fallback: Whether fallback to failover is allowed if migration not
-                  possible
-  @type ignore_consistency: boolean
-  @ivar ignore_consistency: Wheter we should ignore consistency between source
-                            and target node
-  @type shutdown_timeout: int
-  @ivar shutdown_timeout: In case of failover timeout of the shutdown
-  @type ignore_ipolicy: bool
-  @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
-
-  """
-
-  # Constants
-  _MIGRATION_POLL_INTERVAL = 1      # seconds
-  _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
-
-  def __init__(self, lu, instance_name, cleanup, failover, fallback,
-               ignore_consistency, allow_runtime_changes, shutdown_timeout,
-               ignore_ipolicy):
-    """Initializes this class.
-
-    """
-    Tasklet.__init__(self, lu)
-
-    # Parameters
-    self.instance_name = instance_name
-    self.cleanup = cleanup
-    self.live = False # will be overridden later
-    self.failover = failover
-    self.fallback = fallback
-    self.ignore_consistency = ignore_consistency
-    self.shutdown_timeout = shutdown_timeout
-    self.ignore_ipolicy = ignore_ipolicy
-    self.allow_runtime_changes = allow_runtime_changes
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the instance is in the cluster.
-
-    """
-    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
-    instance = self.cfg.GetInstanceInfo(instance_name)
-    assert instance is not None
-    self.instance = instance
-    cluster = self.cfg.GetClusterInfo()
-
-    if (not self.cleanup and
-        not instance.admin_state == constants.ADMINST_UP and
-        not self.failover and self.fallback):
-      self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
-                      " switching to failover")
-      self.failover = True
-
-    if instance.disk_template not in constants.DTS_MIRRORED:
-      if self.failover:
-        text = "failovers"
-      else:
-        text = "migrations"
-      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
-                                 " %s" % (instance.disk_template, text),
-                                 errors.ECODE_STATE)
-
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
-      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
-
-      if self.lu.op.iallocator:
-        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-        self._RunAllocator()
-      else:
-        # We set set self.target_node as it is required by
-        # BuildHooksEnv
-        self.target_node = self.lu.op.target_node
-
-      # Check that the target node is correct in terms of instance policy
-      nodeinfo = self.cfg.GetNodeInfo(self.target_node)
-      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
-      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
-                                                              group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
-                              ignore=self.ignore_ipolicy)
-
-      # self.target_node is already populated, either directly or by the
-      # iallocator run
-      target_node = self.target_node
-      if self.target_node == instance.primary_node:
-        raise errors.OpPrereqError("Cannot migrate instance %s"
-                                   " to its primary (%s)" %
-                                   (instance.name, instance.primary_node),
-                                   errors.ECODE_STATE)
-
-      if len(self.lu.tasklets) == 1:
-        # It is safe to release locks only when we're the only tasklet
-        # in the LU
-        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
-                      keep=[instance.primary_node, self.target_node])
-        _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
-
-    else:
-      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
-
-      secondary_nodes = instance.secondary_nodes
-      if not secondary_nodes:
-        raise errors.ConfigurationError("No secondary node but using"
-                                        " %s disk template" %
-                                        instance.disk_template)
-      target_node = secondary_nodes[0]
-      if self.lu.op.iallocator or (self.lu.op.target_node and
-                                   self.lu.op.target_node != target_node):
-        if self.failover:
-          text = "failed over"
-        else:
-          text = "migrated"
-        raise errors.OpPrereqError("Instances with disk template %s cannot"
-                                   " be %s to arbitrary nodes"
-                                   " (neither an iallocator nor a target"
-                                   " node can be passed)" %
-                                   (instance.disk_template, text),
-                                   errors.ECODE_INVAL)
-      nodeinfo = self.cfg.GetNodeInfo(target_node)
-      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
-      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
-                                                              group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
-                              ignore=self.ignore_ipolicy)
-
-    i_be = cluster.FillBE(instance)
-
-    # check memory requirements on the secondary node
-    if (not self.cleanup and
-         (not self.failover or instance.admin_state == constants.ADMINST_UP)):
-      self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
-                                               "migrating instance %s" %
-                                               instance.name,
-                                               i_be[constants.BE_MINMEM],
-                                               instance.hypervisor)
-    else:
-      self.lu.LogInfo("Not checking memory on the secondary node as"
-                      " instance will not be started")
-
-    # check if failover must be forced instead of migration
-    if (not self.cleanup and not self.failover and
-        i_be[constants.BE_ALWAYS_FAILOVER]):
-      self.lu.LogInfo("Instance configured to always failover; fallback"
-                      " to failover")
-      self.failover = True
-
-    # check bridge existance
-    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
-
-    if not self.cleanup:
-      _CheckNodeNotDrained(self.lu, target_node)
-      if not self.failover:
-        result = self.rpc.call_instance_migratable(instance.primary_node,
-                                                   instance)
-        if result.fail_msg and self.fallback:
-          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
-                          " failover")
-          self.failover = True
-        else:
-          result.Raise("Can't migrate, please use failover",
-                       prereq=True, ecode=errors.ECODE_STATE)
-
-    assert not (self.failover and self.cleanup)
-
-    if not self.failover:
-      if self.lu.op.live is not None and self.lu.op.mode is not None:
-        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
-                                   " parameters are accepted",
-                                   errors.ECODE_INVAL)
-      if self.lu.op.live is not None:
-        if self.lu.op.live:
-          self.lu.op.mode = constants.HT_MIGRATION_LIVE
-        else:
-          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
-        # reset the 'live' parameter to None so that repeated
-        # invocations of CheckPrereq do not raise an exception
-        self.lu.op.live = None
-      elif self.lu.op.mode is None:
-        # read the default value from the hypervisor
-        i_hv = cluster.FillHV(self.instance, skip_globals=False)
-        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
-
-      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
-    else:
-      # Failover is never live
-      self.live = False
-
-    if not (self.failover or self.cleanup):
-      remote_info = self.rpc.call_instance_info(instance.primary_node,
-                                                instance.name,
-                                                instance.hypervisor)
-      remote_info.Raise("Error checking instance on node %s" %
-                        instance.primary_node)
-      instance_running = bool(remote_info.payload)
-      if instance_running:
-        self.current_mem = int(remote_info.payload["memory"])
-
-  def _RunAllocator(self):
-    """Run the allocator based on input opcode.
-
-    """
-    assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-
-    # FIXME: add a self.ignore_ipolicy option
-    req = iallocator.IAReqRelocate(name=self.instance_name,
-                                   relocate_from=[self.instance.primary_node])
-    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
-
-    ial.Run(self.lu.op.iallocator)
-
-    if not ial.success:
-      raise errors.OpPrereqError("Can't compute nodes using"
-                                 " iallocator '%s': %s" %
-                                 (self.lu.op.iallocator, ial.info),
-                                 errors.ECODE_NORES)
-    self.target_node = ial.result[0]
-    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
-                    self.instance_name, self.lu.op.iallocator,
-                    utils.CommaJoin(ial.result))
-
-  def _WaitUntilSync(self):
-    """Poll with custom rpc for disk sync.
-
-    This uses our own step-based rpc call.
-
-    """
-    self.feedback_fn("* wait until resync is done")
-    all_done = False
-    while not all_done:
-      all_done = True
-      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
-                                            self.nodes_ip,
-                                            (self.instance.disks,
-                                             self.instance))
-      min_percent = 100
-      for node, nres in result.items():
-        nres.Raise("Cannot resync disks on node %s" % node)
-        node_done, node_percent = nres.payload
-        all_done = all_done and node_done
-        if node_percent is not None:
-          min_percent = min(min_percent, node_percent)
-      if not all_done:
-        if min_percent < 100:
-          self.feedback_fn("   - progress: %.1f%%" % min_percent)
-        time.sleep(2)
-
-  def _EnsureSecondary(self, node):
-    """Demote a node to secondary.
-
-    """
-    self.feedback_fn("* switching node %s to secondary mode" % node)
-
-    for dev in self.instance.disks:
-      self.cfg.SetDiskID(dev, node)
-
-    result = self.rpc.call_blockdev_close(node, self.instance.name,
-                                          self.instance.disks)
-    result.Raise("Cannot change disk to secondary on node %s" % node)
-
-  def _GoStandalone(self):
-    """Disconnect from the network.
-
-    """
-    self.feedback_fn("* changing into standalone mode")
-    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
-                                               self.instance.disks)
-    for node, nres in result.items():
-      nres.Raise("Cannot disconnect disks node %s" % node)
-
-  def _GoReconnect(self, multimaster):
-    """Reconnect to the network.
-
-    """
-    if multimaster:
-      msg = "dual-master"
-    else:
-      msg = "single-master"
-    self.feedback_fn("* changing disks into %s mode" % msg)
-    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
-                                           (self.instance.disks, self.instance),
-                                           self.instance.name, multimaster)
-    for node, nres in result.items():
-      nres.Raise("Cannot change disks config on node %s" % node)
-
-  def _ExecCleanup(self):
-    """Try to cleanup after a failed migration.
-
-    The cleanup is done by:
-      - check that the instance is running only on one node
-        (and update the config if needed)
-      - change disks on its secondary node to secondary
-      - wait until disks are fully synchronized
-      - disconnect from the network
-      - change disks into single-master mode
-      - wait again until disks are fully synchronized
-
-    """
-    instance = self.instance
-    target_node = self.target_node
-    source_node = self.source_node
-
-    # check running on only one node
-    self.feedback_fn("* checking where the instance actually runs"
-                     " (if this hangs, the hypervisor might be in"
-                     " a bad state)")
-    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
-    for node, result in ins_l.items():
-      result.Raise("Can't contact node %s" % node)
-
-    runningon_source = instance.name in ins_l[source_node].payload
-    runningon_target = instance.name in ins_l[target_node].payload
-
-    if runningon_source and runningon_target:
-      raise errors.OpExecError("Instance seems to be running on two nodes,"
-                               " or the hypervisor is confused; you will have"
-                               " to ensure manually that it runs only on one"
-                               " and restart this operation")
-
-    if not (runningon_source or runningon_target):
-      raise errors.OpExecError("Instance does not seem to be running at all;"
-                               " in this case it's safer to repair by"
-                               " running 'gnt-instance stop' to ensure disk"
-                               " shutdown, and then restarting it")
-
-    if runningon_target:
-      # the migration has actually succeeded, we need to update the config
-      self.feedback_fn("* instance running on secondary node (%s),"
-                       " updating config" % target_node)
-      instance.primary_node = target_node
-      self.cfg.Update(instance, self.feedback_fn)
-      demoted_node = source_node
-    else:
-      self.feedback_fn("* instance confirmed to be running on its"
-                       " primary node (%s)" % source_node)
-      demoted_node = target_node
-
-    if instance.disk_template in constants.DTS_INT_MIRROR:
-      self._EnsureSecondary(demoted_node)
-      try:
-        self._WaitUntilSync()
-      except errors.OpExecError:
-        # we ignore here errors, since if the device is standalone, it
-        # won't be able to sync
-        pass
-      self._GoStandalone()
-      self._GoReconnect(False)
-      self._WaitUntilSync()
-
-    self.feedback_fn("* done")
-
-  def _RevertDiskStatus(self):
-    """Try to revert the disk status after a failed migration.
-
-    """
-    target_node = self.target_node
-    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
-      return
-
-    try:
-      self._EnsureSecondary(target_node)
-      self._GoStandalone()
-      self._GoReconnect(False)
-      self._WaitUntilSync()
-    except errors.OpExecError, err:
-      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
-                         " please try to recover the instance manually;"
-                         " error '%s'" % str(err))
-
-  def _AbortMigration(self):
-    """Call the hypervisor code to abort a started migration.
-
-    """
-    instance = self.instance
-    target_node = self.target_node
-    source_node = self.source_node
-    migration_info = self.migration_info
-
-    abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
-                                                                 instance,
-                                                                 migration_info,
-                                                                 False)
-    abort_msg = abort_result.fail_msg
-    if abort_msg:
-      logging.error("Aborting migration failed on target node %s: %s",
-                    target_node, abort_msg)
-      # Don't raise an exception here, as we stil have to try to revert the
-      # disk status, even if this step failed.
-
-    abort_result = self.rpc.call_instance_finalize_migration_src(
-      source_node, instance, False, self.live)
-    abort_msg = abort_result.fail_msg
-    if abort_msg:
-      logging.error("Aborting migration failed on source node %s: %s",
-                    source_node, abort_msg)
-
-  def _ExecMigration(self):
-    """Migrate an instance.
-
-    The migrate is done by:
-      - change the disks into dual-master mode
-      - wait until disks are fully synchronized again
-      - migrate the instance
-      - change disks on the new secondary node (the old primary) to secondary
-      - wait until disks are fully synchronized
-      - change disks into single-master mode
-
-    """
-    instance = self.instance
-    target_node = self.target_node
-    source_node = self.source_node
-
-    # Check for hypervisor version mismatch and warn the user.
-    nodeinfo = self.rpc.call_node_info([source_node, target_node],
-                                       None, [self.instance.hypervisor], False)
-    for ninfo in nodeinfo.values():
-      ninfo.Raise("Unable to retrieve node information from node '%s'" %
-                  ninfo.node)
-    (_, _, (src_info, )) = nodeinfo[source_node].payload
-    (_, _, (dst_info, )) = nodeinfo[target_node].payload
-
-    if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
-        (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
-      src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
-      dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
-      if src_version != dst_version:
-        self.feedback_fn("* warning: hypervisor version mismatch between"
-                         " source (%s) and target (%s) node" %
-                         (src_version, dst_version))
-
-    self.feedback_fn("* checking disk consistency between source and target")
-    for (idx, dev) in enumerate(instance.disks):
-      if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
-        raise errors.OpExecError("Disk %s is degraded or not fully"
-                                 " synchronized on target node,"
-                                 " aborting migration" % idx)
-
-    if self.current_mem > self.tgt_free_mem:
-      if not self.allow_runtime_changes:
-        raise errors.OpExecError("Memory ballooning not allowed and not enough"
-                                 " free memory to fit instance %s on target"
-                                 " node %s (have %dMB, need %dMB)" %
-                                 (instance.name, target_node,
-                                  self.tgt_free_mem, self.current_mem))
-      self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
-      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
-                                                     instance,
-                                                     self.tgt_free_mem)
-      rpcres.Raise("Cannot modify instance runtime memory")
-
-    # First get the migration information from the remote node
-    result = self.rpc.call_migration_info(source_node, instance)
-    msg = result.fail_msg
-    if msg:
-      log_err = ("Failed fetching source migration information from %s: %s" %
-                 (source_node, msg))
-      logging.error(log_err)
-      raise errors.OpExecError(log_err)
-
-    self.migration_info = migration_info = result.payload
-
-    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
-      # Then switch the disks to master/master mode
-      self._EnsureSecondary(target_node)
-      self._GoStandalone()
-      self._GoReconnect(True)
-      self._WaitUntilSync()
-
-    self.feedback_fn("* preparing %s to accept the instance" % target_node)
-    result = self.rpc.call_accept_instance(target_node,
-                                           instance,
-                                           migration_info,
-                                           self.nodes_ip[target_node])
-
-    msg = result.fail_msg
-    if msg:
-      logging.error("Instance pre-migration failed, trying to revert"
-                    " disk status: %s", msg)
-      self.feedback_fn("Pre-migration failed, aborting")
-      self._AbortMigration()
-      self._RevertDiskStatus()
-      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
-                               (instance.name, msg))
-
-    self.feedback_fn("* migrating instance to %s" % target_node)
-    result = self.rpc.call_instance_migrate(source_node, instance,
-                                            self.nodes_ip[target_node],
-                                            self.live)
-    msg = result.fail_msg
-    if msg:
-      logging.error("Instance migration failed, trying to revert"
-                    " disk status: %s", msg)
-      self.feedback_fn("Migration failed, aborting")
-      self._AbortMigration()
-      self._RevertDiskStatus()
-      raise errors.OpExecError("Could not migrate instance %s: %s" %
-                               (instance.name, msg))
-
-    self.feedback_fn("* starting memory transfer")
-    last_feedback = time.time()
-    while True:
-      result = self.rpc.call_instance_get_migration_status(source_node,
-                                                           instance)
-      msg = result.fail_msg
-      ms = result.payload   # MigrationStatus instance
-      if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
-        logging.error("Instance migration failed, trying to revert"
-                      " disk status: %s", msg)
-        self.feedback_fn("Migration failed, aborting")
-        self._AbortMigration()
-        self._RevertDiskStatus()
-        if not msg:
-          msg = "hypervisor returned failure"
-        raise errors.OpExecError("Could not migrate instance %s: %s" %
-                                 (instance.name, msg))
-
-      if result.payload.status != constants.HV_MIGRATION_ACTIVE:
-        self.feedback_fn("* memory transfer complete")
-        break
-
-      if (utils.TimeoutExpired(last_feedback,
-                               self._MIGRATION_FEEDBACK_INTERVAL) and
-          ms.transferred_ram is not None):
-        mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
-        self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
-        last_feedback = time.time()
-
-      time.sleep(self._MIGRATION_POLL_INTERVAL)
-
-    result = self.rpc.call_instance_finalize_migration_src(source_node,
-                                                           instance,
-                                                           True,
-                                                           self.live)
-    msg = result.fail_msg
-    if msg:
-      logging.error("Instance migration succeeded, but finalization failed"
-                    " on the source node: %s", msg)
-      raise errors.OpExecError("Could not finalize instance migration: %s" %
-                               msg)
-
-    instance.primary_node = target_node
-
-    # distribute new instance config to the other nodes
-    self.cfg.Update(instance, self.feedback_fn)
-
-    result = self.rpc.call_instance_finalize_migration_dst(target_node,
-                                                           instance,
-                                                           migration_info,
-                                                           True)
-    msg = result.fail_msg
-    if msg:
-      logging.error("Instance migration succeeded, but finalization failed"
-                    " on the target node: %s", msg)
-      raise errors.OpExecError("Could not finalize instance migration: %s" %
-                               msg)
-
-    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
-      self._EnsureSecondary(source_node)
-      self._WaitUntilSync()
-      self._GoStandalone()
-      self._GoReconnect(False)
-      self._WaitUntilSync()
-
-    # If the instance's disk template is `rbd' or `ext' and there was a
-    # successful migration, unmap the device from the source node.
-    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
-      disks = _ExpandCheckDisks(instance, instance.disks)
-      self.feedback_fn("* unmapping instance's disks from %s" % source_node)
-      for disk in disks:
-        result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
-        msg = result.fail_msg
-        if msg:
-          logging.error("Migration was successful, but couldn't unmap the"
-                        " block device %s on source node %s: %s",
-                        disk.iv_name, source_node, msg)
-          logging.error("You need to unmap the device %s manually on %s",
-                        disk.iv_name, source_node)
-
-    self.feedback_fn("* done")
-
-  def _ExecFailover(self):
-    """Failover an instance.
-
-    The failover is done by shutting it down on its present node and
-    starting it on the secondary.
-
-    """
-    instance = self.instance
-    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
-
-    source_node = instance.primary_node
-    target_node = self.target_node
-
-    if instance.admin_state == constants.ADMINST_UP:
-      self.feedback_fn("* checking disk consistency between source and target")
-      for (idx, dev) in enumerate(instance.disks):
-        # for drbd, these are drbd over lvm
-        if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
-                                     False):
-          if primary_node.offline:
-            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
-                             " target node %s" %
-                             (primary_node.name, idx, target_node))
-          elif not self.ignore_consistency:
-            raise errors.OpExecError("Disk %s is degraded on target node,"
-                                     " aborting failover" % idx)
-    else:
-      self.feedback_fn("* not checking disk consistency as instance is not"
-                       " running")
-
-    self.feedback_fn("* shutting down instance on source node")
-    logging.info("Shutting down instance %s on node %s",
-                 instance.name, source_node)
-
-    result = self.rpc.call_instance_shutdown(source_node, instance,
-                                             self.shutdown_timeout,
-                                             self.lu.op.reason)
-    msg = result.fail_msg
-    if msg:
-      if self.ignore_consistency or primary_node.offline:
-        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
-                           " proceeding anyway; please make sure node"
-                           " %s is down; error details: %s",
-                           instance.name, source_node, source_node, msg)
-      else:
-        raise errors.OpExecError("Could not shutdown instance %s on"
-                                 " node %s: %s" %
-                                 (instance.name, source_node, msg))
-
-    self.feedback_fn("* deactivating the instance's disks on source node")
-    if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
-      raise errors.OpExecError("Can't shut down the instance's disks")
-
-    instance.primary_node = target_node
-    # distribute new instance config to the other nodes
-    self.cfg.Update(instance, self.feedback_fn)
-
-    # Only start the instance if it's marked as up
-    if instance.admin_state == constants.ADMINST_UP:
-      self.feedback_fn("* activating the instance's disks on target node %s" %
-                       target_node)
-      logging.info("Starting instance %s on node %s",
-                   instance.name, target_node)
-
-      disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
-                                           ignore_secondaries=True)
-      if not disks_ok:
-        _ShutdownInstanceDisks(self.lu, instance)
-        raise errors.OpExecError("Can't activate the instance's disks")
-
-      self.feedback_fn("* starting the instance on the target node %s" %
-                       target_node)
-      result = self.rpc.call_instance_start(target_node, (instance, None, None),
-                                            False, self.lu.op.reason)
-      msg = result.fail_msg
-      if msg:
-        _ShutdownInstanceDisks(self.lu, instance)
-        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
-                                 (instance.name, target_node, msg))
-
-  def Exec(self, feedback_fn):
-    """Perform the migration.
-
-    """
-    self.feedback_fn = feedback_fn
-    self.source_node = self.instance.primary_node
-
-    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
-    if self.instance.disk_template in constants.DTS_INT_MIRROR:
-      self.target_node = self.instance.secondary_nodes[0]
-      # Otherwise self.target_node has been populated either
-      # directly, or through an iallocator.
-
-    self.all_nodes = [self.source_node, self.target_node]
-    self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
-                         in self.cfg.GetMultiNodeInfo(self.all_nodes))
-
-    if self.failover:
-      feedback_fn("Failover instance %s" % self.instance.name)
-      self._ExecFailover()
-    else:
-      feedback_fn("Migrating instance %s" % self.instance.name)
-
-      if self.cleanup:
-        return self._ExecCleanup()
-      else:
-        return self._ExecMigration()