Merge branch 'stable-2.12' into stable-2.13
[ganeti-github.git] / lib / cmdlib / instance.py
index 9fc14d4..e6c5866 100644 (file)
@@ -65,9 +65,10 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
 from ganeti.cmdlib.instance_storage import CreateDisks, \
   CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
   WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
-  ComputeDisks, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
+  ComputeDisks, ComputeDisksInfo, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
   GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
-  AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk
+  AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk, \
+  CalculateFileStorageDir
 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
   GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
   NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
@@ -134,6 +135,7 @@ def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
   spindle_use = beparams[constants.BE_SPINDLE_USE]
   return iallocator.IAReqInstanceAlloc(name=op.instance_name,
                                        disk_template=op.disk_template,
+                                       group_name=op.group_name,
                                        tags=op.tags,
                                        os=op.os_type,
                                        vcpus=beparams[constants.BE_VCPUS],
@@ -220,8 +222,9 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
       nic_ip = ip
 
     # TODO: check the ip address for uniqueness
-    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
-      raise errors.OpPrereqError("Routed nic mode requires an ip address",
+    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net:
+      raise errors.OpPrereqError("Routed nic mode requires an ip address"
+                                 " if not attached to a network",
                                  errors.ECODE_INVAL)
 
     # MAC address verification
@@ -683,6 +686,8 @@ class LUInstanceCreate(LogicalUnit):
       # When opportunistic locks are used only a temporary failure is generated
       if self.op.opportunistic_locking:
         ecode = errors.ECODE_TEMP_NORES
+        self.LogInfo("IAllocator '%s' failed on opportunistically acquired"
+                     " nodes: %s", self.op.iallocator, ial.info)
       else:
         ecode = errors.ECODE_NORES
 
@@ -728,9 +733,9 @@ class LUInstanceCreate(LogicalUnit):
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
-              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
-             for d in self.disks],
+      # Note that self.disks here is not a list with objects.Disk
+      # but with dicts as returned by ComputeDisks.
+      disks=self.disks,
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -922,46 +927,6 @@ class LUInstanceCreate(LogicalUnit):
       if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
         del self.op.osparams_private[name]
 
-  def _CalculateFileStorageDir(self):
-    """Calculate final instance file storage dir.
-
-    """
-    # file storage dir calculation/check
-    self.instance_file_storage_dir = None
-    if self.op.disk_template in constants.DTS_FILEBASED:
-      # build the full file storage dir path
-      joinargs = []
-
-      cfg_storage = None
-      if self.op.disk_template == constants.DT_FILE:
-        cfg_storage = self.cfg.GetFileStorageDir()
-      elif self.op.disk_template == constants.DT_SHARED_FILE:
-        cfg_storage = self.cfg.GetSharedFileStorageDir()
-      elif self.op.disk_template == constants.DT_GLUSTER:
-        cfg_storage = self.cfg.GetGlusterStorageDir()
-
-      if not cfg_storage:
-        raise errors.OpPrereqError(
-          "Cluster file storage dir for {tpl} storage type not defined".format(
-            tpl=repr(self.op.disk_template)
-          ),
-          errors.ECODE_STATE
-      )
-
-      joinargs.append(cfg_storage)
-
-      if self.op.file_storage_dir is not None:
-        joinargs.append(self.op.file_storage_dir)
-
-      if self.op.disk_template != constants.DT_GLUSTER:
-        joinargs.append(self.op.instance_name)
-
-      if len(joinargs) > 1:
-        # pylint: disable=W0142
-        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
-      else:
-        self.instance_file_storage_dir = joinargs[0]
-
   def CheckPrereq(self): # pylint: disable=R0914
     """Check prerequisites.
 
@@ -981,7 +946,7 @@ class LUInstanceCreate(LogicalUnit):
                                   utils.CommaJoin(owned_groups)),
                                  errors.ECODE_STATE)
 
-    self._CalculateFileStorageDir()
+    self.instance_file_storage_dir = CalculateFileStorageDir(self)
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       export_info = self._ReadExportInfo()
@@ -1049,7 +1014,7 @@ class LUInstanceCreate(LogicalUnit):
 
     # disk checks/pre-build
     default_vg = self.cfg.GetVGName()
-    self.disks = ComputeDisks(self.op, default_vg)
+    self.disks = ComputeDisks(self.op.disks, self.op.disk_template, default_vg)
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       disk_images = []
@@ -1291,18 +1256,19 @@ class LUInstanceCreate(LogicalUnit):
     # Check disk access param to be compatible with specified hypervisor
     node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
     node_group = self.cfg.GetNodeGroup(node_info.group)
-    disk_params = self.cfg.GetGroupDiskParams(node_group)
-    access_type = disk_params[self.op.disk_template].get(
+    group_disk_params = self.cfg.GetGroupDiskParams(node_group)
+    group_access_type = group_disk_params[self.op.disk_template].get(
       constants.RBD_ACCESS, constants.DISK_KERNELSPACE
     )
-
-    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
-                                            self.op.disk_template,
-                                            access_type):
-      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
-                                 " used with %s disk access param" %
-                                 (self.op.hypervisor, access_type),
-                                  errors.ECODE_STATE)
+    for dsk in self.disks:
+      access_type = dsk.get(constants.IDISK_ACCESS, group_access_type)
+      if not IsValidDiskAccessModeCombination(self.op.hypervisor,
+                                              self.op.disk_template,
+                                              access_type):
+        raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
+                                   " used with %s disk access param" %
+                                   (self.op.hypervisor, access_type),
+                                    errors.ECODE_STATE)
 
     # Verify instance specs
     spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
@@ -1736,7 +1702,7 @@ class LUInstanceCreate(LogicalUnit):
     else:
       feedback_fn("* creating instance disks...")
       try:
-        CreateDisks(self, iobj, instance_disks=disks)
+        CreateDisks(self, iobj, disks=disks)
       except errors.OpExecError:
         self.LogWarning("Device creation failed")
         self.cfg.ReleaseDRBDMinors(instance_uuid)
@@ -1837,6 +1803,16 @@ class LUInstanceCreate(LogicalUnit):
 
     return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
 
+  def PrepareRetry(self, feedback_fn):
+    # A temporary lack of resources can only happen if opportunistic locking
+    # is used.
+    assert self.op.opportunistic_locking
+
+    logging.info("Opportunistic locking did not suceed, falling back to"
+                 " full lock allocation")
+    feedback_fn("* falling back to full lock allocation")
+    self.op.opportunistic_locking = False
+
 
 class LUInstanceRename(LogicalUnit):
   """Rename an instance.
@@ -1855,6 +1831,8 @@ class LUInstanceRename(LogicalUnit):
       raise errors.OpPrereqError("IP address check requires a name check",
                                  errors.ECODE_INVAL)
 
+    self._new_name_resolved = False
+
   def BuildHooksEnv(self):
     """Build hooks env.
 
@@ -1873,6 +1851,22 @@ class LUInstanceRename(LogicalUnit):
       list(self.cfg.GetInstanceNodes(self.instance.uuid))
     return (nl, nl)
 
+  def _PerformChecksAndResolveNewName(self):
+    """Checks and resolves the new name, storing the FQDN, if permitted.
+
+    """
+    if self._new_name_resolved or not self.op.name_check:
+      return
+
+    hostname = _CheckHostnameSane(self, self.op.new_name)
+    self.op.new_name = hostname.name
+    if (self.op.ip_check and
+        netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
+      raise errors.OpPrereqError("IP %s of instance %s already in use" %
+                                 (hostname.ip, self.op.new_name),
+                                 errors.ECODE_NOTUNIQUE)
+    self._new_name_resolved = True
+
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -1898,22 +1892,21 @@ class LUInstanceRename(LogicalUnit):
                        msg="cannot rename")
     self.instance = instance
 
-    new_name = self.op.new_name
-    if self.op.name_check:
-      hostname = _CheckHostnameSane(self, new_name)
-      new_name = self.op.new_name = hostname.name
-      if (self.op.ip_check and
-          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
-        raise errors.OpPrereqError("IP %s of instance %s already in use" %
-                                   (hostname.ip, new_name),
-                                   errors.ECODE_NOTUNIQUE)
+    self._PerformChecksAndResolveNewName()
 
-    if new_name != instance.name:
-      CheckInstanceExistence(self, new_name)
+    if self.op.new_name != instance.name:
+      CheckInstanceExistence(self, self.op.new_name)
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
 
+    # Note that this call might not resolve anything if name checks have been
+    # disabled in the opcode. In this case, we might have a renaming collision
+    # if a shortened name and a full name are used simultaneously, as we will
+    # have two different locks. However, at that point the user has taken away
+    # the tools necessary to detect this issue.
+    self._PerformChecksAndResolveNewName()
+
     # Used to prevent instance namespace collisions.
     if self.op.new_name != self.op.instance_name:
       CheckInstanceExistence(self, self.op.new_name)
@@ -1992,6 +1985,8 @@ class LUInstanceRemove(LogicalUnit):
     self.needed_locks[locking.LEVEL_NODE] = []
     self.needed_locks[locking.LEVEL_NODE_RES] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+    self.dont_collate_locks[locking.LEVEL_NODE] = True
+    self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
@@ -2064,6 +2059,17 @@ class LUInstanceRemove(LogicalUnit):
 class LUInstanceMove(LogicalUnit):
   """Move an instance by data-copying.
 
+  This LU is only used if the instance needs to be moved by copying the data
+  from one node in the cluster to another. The instance is shut down and
+  the data is copied to the new node and the configuration change is propagated,
+  then the instance is started again.
+
+  See also:
+  L{LUInstanceFailover} for moving an instance on shared storage (no copying
+  required).
+
+  L{LUInstanceMigrate} for the live migration of an instance (no shutdown
+  required).
   """
   HPATH = "instance-move"
   HTYPE = constants.HTYPE_INSTANCE
@@ -2120,10 +2126,12 @@ class LUInstanceMove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
-    if self.instance.disk_template not in constants.DTS_COPYABLE:
-      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
-                                 self.instance.disk_template,
-                                 errors.ECODE_STATE)
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    for idx, dsk in enumerate(disks):
+      if dsk.dev_type not in constants.DTS_COPYABLE:
+        raise errors.OpPrereqError("Instance disk %d has disk type %s and is"
+                                   " not suitable for copying"
+                                   % (idx, dsk.dev_type), errors.ECODE_STATE)
 
     target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
     assert target_node is not None, \
@@ -2138,13 +2146,6 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     bep = cluster.FillBE(self.instance)
 
-    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    for idx, dsk in enumerate(disks):
-      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
-                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
-        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
-                                   " cannot copy" % idx, errors.ECODE_STATE)
-
     CheckNodeOnline(self, target_node.uuid)
     CheckNodeNotDrained(self, target_node.uuid)
     CheckNodeVmCapable(self, target_node.uuid)
@@ -2359,7 +2360,9 @@ class LUInstanceMultiAlloc(NoHooksLU):
       else:
         node_whitelist = None
 
-      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op.disks,
+                                                            op.disk_template,
+                                                            default_vg),
                                            _ComputeNics(op, cluster, None,
                                                         self.cfg, ec_id),
                                            _ComputeFullBeParams(op, cluster),
@@ -2691,7 +2694,7 @@ class LUInstanceSetParams(LogicalUnit):
       else:
         raise errors.ProgrammerError("Unhandled operation '%s'" % op)
 
-  def _VerifyDiskModification(self, op, params, excl_stor):
+  def _VerifyDiskModification(self, op, params, excl_stor, group_access_type):
     """Verifies a disk modification.
 
     """
@@ -2714,6 +2717,17 @@ class LUInstanceSetParams(LogicalUnit):
 
       CheckSpindlesExclusiveStorage(params, excl_stor, True)
 
+      # Check disk access param (only for specific disks)
+      if self.instance.disk_template in constants.DTS_HAVE_ACCESS:
+        access_type = params.get(constants.IDISK_ACCESS, group_access_type)
+        if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
+                                                self.instance.disk_template,
+                                                access_type):
+          raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
+                                     " used with %s disk access param" %
+                                     (self.instance.hypervisor, access_type),
+                                      errors.ECODE_STATE)
+
     elif op == constants.DDM_MODIFY:
       if constants.IDISK_SIZE in params:
         raise errors.OpPrereqError("Disk size change not possible, use"
@@ -2723,6 +2737,11 @@ class LUInstanceSetParams(LogicalUnit):
       # Changing arbitrary parameters is allowed only for ext disk template",
       if self.instance.disk_template != constants.DT_EXT:
         utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
+      else:
+        # We have to check that 'access' parameter can not be modified
+        if constants.IDISK_ACCESS in params:
+          raise errors.OpPrereqError("Disk 'access' parameter change is"
+                                     " not possible", errors.ECODE_INVAL)
 
       name = params.get(constants.IDISK_NAME, None)
       if name is not None and name.lower() == constants.VALUE_NONE:
@@ -2796,17 +2815,33 @@ class LUInstanceSetParams(LogicalUnit):
     self.op.nics = self._UpgradeDiskNicMods(
       "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
 
-    if self.op.disks and self.op.disk_template is not None:
-      raise errors.OpPrereqError("Disk template conversion and other disk"
-                                 " changes not supported at the same time",
-                                 errors.ECODE_INVAL)
+    # Check disk template modifications
+    if self.op.disk_template:
+      if self.op.disks:
+        raise errors.OpPrereqError("Disk template conversion and other disk"
+                                   " changes not supported at the same time",
+                                   errors.ECODE_INVAL)
 
-    if (self.op.disk_template and
-        self.op.disk_template in constants.DTS_INT_MIRROR and
-        self.op.remote_node is None):
-      raise errors.OpPrereqError("Changing the disk template to a mirrored"
-                                 " one requires specifying a secondary node",
-                                 errors.ECODE_INVAL)
+      # mirrored template node checks
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        if not self.op.remote_node:
+          raise errors.OpPrereqError("Changing the disk template to a mirrored"
+                                     " one requires specifying a secondary"
+                                     " node", errors.ECODE_INVAL)
+      elif self.op.remote_node:
+        self.LogWarning("Changing the disk template to a non-mirrored one,"
+                        " the secondary node will be ignored")
+        # the secondary node must be cleared in order to be ignored, otherwise
+        # the operation will fail, in the GenerateDiskTemplate method
+        self.op.remote_node = None
+
+      # file-based template checks
+      if self.op.disk_template in constants.DTS_FILEBASED:
+        if not self.op.file_driver:
+          self.op.file_driver = constants.FD_DEFAULT
+        elif self.op.file_driver not in constants.FILE_DRIVER:
+          raise errors.OpPrereqError("Invalid file driver name '%s'" %
+                                     self.op.file_driver, errors.ECODE_INVAL)
 
     # Check NIC modifications
     self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
@@ -2826,6 +2861,9 @@ class LUInstanceSetParams(LogicalUnit):
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
     # Look node group to look up the ipolicy
     self.share_locks[locking.LEVEL_NODEGROUP] = 1
+    self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
+    self.dont_collate_locks[locking.LEVEL_NODE] = True
+    self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODEGROUP:
@@ -2937,9 +2975,10 @@ class LUInstanceSetParams(LogicalUnit):
 
     elif new_mode == constants.NIC_MODE_ROUTED:
       ip = params.get(constants.INIC_IP, old_ip)
-      if ip is None:
+      if ip is None and not new_net_uuid:
         raise errors.OpPrereqError("Cannot set the NIC IP address to None"
-                                   " on a routed NIC", errors.ECODE_INVAL)
+                                   " on a routed NIC if not attached to a"
+                                   " network", errors.ECODE_INVAL)
 
     elif new_mode == constants.NIC_MODE_OVS:
       # TODO: check OVS link
@@ -3035,23 +3074,46 @@ class LUInstanceSetParams(LogicalUnit):
     """CheckPrereq checks related to a new disk template."""
     # Arguments are passed to avoid configuration lookups
     pnode_uuid = self.instance.primary_node
-    if self.instance.disk_template == self.op.disk_template:
+
+    if self.instance.disk_template in constants.DTS_NOT_CONVERTIBLE_FROM:
+      raise errors.OpPrereqError("Conversion from the '%s' disk template is"
+                                 " not supported" % self.instance.disk_template,
+                                 errors.ECODE_INVAL)
+
+    elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
+      raise errors.OpPrereqError("Conversion to the '%s' disk template is"
+                                 " not supported" % self.op.disk_template,
+                                 errors.ECODE_INVAL)
+
+    if (self.op.disk_template != constants.DT_EXT and
+        self.instance.disk_template == self.op.disk_template):
       raise errors.OpPrereqError("Instance already has disk template %s" %
                                  self.instance.disk_template,
                                  errors.ECODE_INVAL)
 
     if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
+      enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
       raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
-                                 " cluster." % self.op.disk_template)
+                                 " cluster (enabled templates: %s)" %
+                                 (self.op.disk_template, enabled_dts),
+                                  errors.ECODE_STATE)
+
+    default_vg = self.cfg.GetVGName()
+    if (not default_vg and
+        self.op.disk_template not in constants.DTS_NOT_LVM):
+      raise errors.OpPrereqError("Disk template conversions to lvm-based"
+                                 " instances are not supported by the cluster",
+                                 errors.ECODE_STATE)
 
-    if (self.instance.disk_template,
-        self.op.disk_template) not in self._DISK_CONVERSIONS:
-      raise errors.OpPrereqError("Unsupported disk template conversion from"
-                                 " %s to %s" % (self.instance.disk_template,
-                                                self.op.disk_template),
-                                 errors.ECODE_INVAL)
     CheckInstanceState(self, self.instance, INSTANCE_DOWN,
                        msg="cannot change disk template")
+
+    # compute new disks' information
+    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
+                                       default_vg, self.op.ext_params)
+
+    # mirror node verification
     if self.op.disk_template in constants.DTS_INT_MIRROR:
       if self.op.remote_node_uuid == pnode_uuid:
         raise errors.OpPrereqError("Given new secondary node %s is the same"
@@ -3059,13 +3121,7 @@ class LUInstanceSetParams(LogicalUnit):
                                    self.op.remote_node, errors.ECODE_STATE)
       CheckNodeOnline(self, self.op.remote_node_uuid)
       CheckNodeNotDrained(self, self.op.remote_node_uuid)
-      # FIXME: here we assume that the old instance type is DT_PLAIN
-      assert self.instance.disk_template == constants.DT_PLAIN
-      disks = [{constants.IDISK_SIZE: d.size,
-                constants.IDISK_VG: d.logical_id[0]}
-               for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
-      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
-      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
+      CheckNodeVmCapable(self, self.op.remote_node_uuid)
 
       snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
@@ -3079,6 +3135,17 @@ class LUInstanceSetParams(LogicalUnit):
                         " from the first disk's node group will be"
                         " used")
 
+    # check that the template is in the primary node group's allowed templates
+    pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
+    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
+                                                            pnode_group)
+    allowed_dts = ipolicy[constants.IPOLICY_DTS]
+    if self.op.disk_template not in allowed_dts:
+      raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
+                                 " templates: %s)" % (self.op.disk_template,
+                                 utils.CommaJoin(allowed_dts)),
+                                 errors.ECODE_STATE)
+
     if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
       # Make sure none of the nodes require exclusive storage
       nodes = [pnode_info]
@@ -3092,6 +3159,35 @@ class LUInstanceSetParams(LogicalUnit):
                                            self.op.disk_template))
         raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
 
+    # node capacity checks
+    if (self.op.disk_template == constants.DT_PLAIN and
+        self.instance.disk_template == constants.DT_DRBD8):
+      # we ensure that no capacity checks will be made for conversions from
+      # the 'drbd' to the 'plain' disk template
+      pass
+    elif (self.op.disk_template == constants.DT_DRBD8 and
+          self.instance.disk_template == constants.DT_PLAIN):
+      # for conversions from the 'plain' to the 'drbd' disk template, check
+      # only the remote node's capacity
+      req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
+      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
+    elif self.op.disk_template in constants.DTS_LVM:
+      # rest lvm-based capacity checks
+      node_uuids = [pnode_uuid]
+      if self.op.remote_node_uuid:
+        node_uuids.append(self.op.remote_node_uuid)
+      req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
+      CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
+    elif self.op.disk_template == constants.DT_RBD:
+      # CheckRADOSFreeSpace() is simply a placeholder
+      CheckRADOSFreeSpace()
+    elif self.op.disk_template == constants.DT_EXT:
+      # FIXME: Capacity checks for extstorage template, if exists
+      pass
+    else:
+      # FIXME: Checks about other non lvm-based disk templates
+      pass
+
   def _PreCheckDisks(self, ispec):
     """CheckPrereq checks related to disk changes.
 
@@ -3106,9 +3202,18 @@ class LUInstanceSetParams(LogicalUnit):
       rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
       )
 
+    # Get the group access type
+    node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
+    node_group = self.cfg.GetNodeGroup(node_info.group)
+    group_disk_params = self.cfg.GetGroupDiskParams(node_group)
+    group_access_type = group_disk_params[self.instance.disk_template].get(
+      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
+    )
+
     # Check disk modifications. This is done here and not in CheckArguments
     # (as with NICs), because we need to know the instance's disk template
-    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
+    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
+                                                          group_access_type)
     if self.instance.disk_template == constants.DT_EXT:
       self._CheckMods("disk", self.op.disks, {}, ver_fn)
     else:
@@ -3289,7 +3394,7 @@ class LUInstanceSetParams(LogicalUnit):
       if result.fail_msg:
         if self.op.hotplug:
           result.Raise("Hotplug is not possible: %s" % result.fail_msg,
-                       prereq=True)
+                       prereq=True, ecode=errors.ECODE_STATE)
         else:
           self.LogWarning(result.fail_msg)
           self.op.hotplug = False
@@ -3314,6 +3419,7 @@ class LUInstanceSetParams(LogicalUnit):
 
     if self.op.disk_template:
       self._PreCheckDiskTemplate(pnode_info)
+      self.instance_file_storage_dir = CalculateFileStorageDir(self)
 
     self._PreCheckDisks(ispec)
 
@@ -3368,7 +3474,7 @@ class LUInstanceSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
       # Only perform this test if a new CPU mask is given
-      if constants.HV_CPU_MASK in self.hv_new:
+      if constants.HV_CPU_MASK in self.hv_new and cpu_list:
         # Calculate the largest CPU number requested
         max_requested_cpu = max(map(max, cpu_list))
         # Check that all of the instance's nodes have enough physical CPUs to
@@ -3591,25 +3697,158 @@ class LUInstanceSetParams(LogicalUnit):
                 utils.CommaJoin(set(res_max + res_min))))
         raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
+  def _ConvertInstanceTemplate(self, feedback_fn):
+    """Converts the disk template of an instance.
+
+    This function converts the disk template of an instance. It supports
+    conversions among all the available disk templates except conversions
+    between the LVM-based disk templates, that use their separate code path.
+    Also, this method does not support conversions that include the 'diskless'
+    template and those targeting the 'blockdev' template.
+
+    @type feedback_fn: callable
+    @param feedback_fn: function used to send feedback back to the caller
+
+    @rtype: NoneType
+    @return: None
+    @raise errors.OpPrereqError: in case of failure
+
+    """
+    template_info = self.op.disk_template
+    if self.op.disk_template == constants.DT_EXT:
+      template_info = ":".join([self.op.disk_template,
+                                self.op.ext_params["provider"]])
+
+    feedback_fn("Converting disk template from '%s' to '%s'" %
+                (self.instance.disk_template, template_info))
+
+    assert not (self.instance.disk_template in
+                constants.DTS_NOT_CONVERTIBLE_FROM or
+                self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
+      ("Unsupported disk template conversion from '%s' to '%s'" %
+       (self.instance.disk_template, self.op.disk_template))
+
+    pnode_uuid = self.instance.primary_node
+    snode_uuid = []
+    if self.op.remote_node_uuid:
+      snode_uuid = [self.op.remote_node_uuid]
+
+    old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    feedback_fn("Generating new '%s' disk template..." % template_info)
+    new_disks = GenerateDiskTemplate(self,
+                                     self.op.disk_template,
+                                     self.instance.uuid,
+                                     pnode_uuid,
+                                     snode_uuid,
+                                     self.disks_info,
+                                     self.instance_file_storage_dir,
+                                     self.op.file_driver,
+                                     0,
+                                     feedback_fn,
+                                     self.diskparams)
+
+    # Create the new block devices for the instance.
+    feedback_fn("Creating new empty disks of type '%s'..." % template_info)
+    try:
+      CreateDisks(self, self.instance, disk_template=self.op.disk_template,
+                  disks=new_disks)
+    except errors.OpExecError:
+      self.LogWarning("Device creation failed")
+      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
+      raise
+
+    # Transfer the data from the old to the newly created disks of the instance.
+    feedback_fn("Populating the new empty disks of type '%s'..." %
+                template_info)
+    for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
+      feedback_fn(" - copying data from disk %s (%s), size %s" %
+                  (idx, self.instance.disk_template,
+                   utils.FormatUnit(new.size, "h")))
+      if self.instance.disk_template == constants.DT_DRBD8:
+        old = old.children[0]
+      result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
+                                              (new, self.instance))
+      msg = result.fail_msg
+      if msg:
+        # A disk failed to copy. Abort the conversion operation and rollback
+        # the modifications to the previous state. The instance will remain
+        # intact.
+        if self.op.disk_template == constants.DT_DRBD8:
+          new = new.children[0]
+        self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
+                 (old.logical_id[1], new.logical_id[1]))
+        try:
+          self.LogInfo("Some disks failed to copy")
+          self.LogInfo("The instance will not be affected, aborting operation")
+          self.LogInfo("Removing newly created disks of type '%s'..." %
+                       template_info)
+          RemoveDisks(self, self.instance, disk_template=self.op.disk_template,
+                      disks=new_disks)
+          self.LogInfo("Newly created disks removed successfully")
+        finally:
+          self.cfg.ReleaseDRBDMinors(self.instance.uuid)
+          result.Raise("Error while converting the instance's template")
+
+    # In case of DRBD disk, return its port to the pool
+    if self.instance.disk_template == constants.DT_DRBD8:
+      for disk in old_disks:
+        tcp_port = disk.logical_id[2]
+        self.cfg.AddTcpUdpPort(tcp_port)
+
+    # Remove old disks from the instance.
+    feedback_fn("Detaching old disks (%s) from the instance and removing"
+                " them from cluster config" % self.instance.disk_template)
+    for old_disk in old_disks:
+      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
+
+    # The old disk_template will be needed to remove the old block devices.
+    old_disk_template = self.instance.disk_template
+
+    # Update the disk template of the instance
+    self.cfg.SetInstanceDiskTemplate(self.instance.uuid, self.op.disk_template)
+
+    # Attach the new disks to the instance.
+    feedback_fn("Adding new disks (%s) to cluster config and attaching"
+                " them to the instance" % template_info)
+    for (idx, new_disk) in enumerate(new_disks):
+      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
+
+    # Re-read the instance from the configuration.
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    # Release node locks while waiting for sync and disks removal.
+    ReleaseLocks(self, locking.LEVEL_NODE)
+
+    disk_abort = not WaitForSync(self, self.instance,
+                                 oneshot=not self.op.wait_for_sync)
+    if disk_abort:
+      raise errors.OpExecError("There are some degraded disks for"
+                               " this instance, please cleanup manually")
+
+    feedback_fn("Removing old block devices of type '%s'..." %
+                old_disk_template)
+    RemoveDisks(self, self.instance, disk_template=old_disk_template,
+                disks=old_disks)
+
+    # Node resource locks will be released by the caller.
+
   def _ConvertPlainToDrbd(self, feedback_fn):
     """Converts an instance from plain to drbd.
 
     """
-    feedback_fn("Converting template to drbd")
+    feedback_fn("Converting disk template from 'plain' to 'drbd'")
+
     pnode_uuid = self.instance.primary_node
     snode_uuid = self.op.remote_node_uuid
 
     assert self.instance.disk_template == constants.DT_PLAIN
 
     old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    # create a fake disk info for _GenerateDiskTemplate
-    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
-                  constants.IDISK_VG: d.logical_id[0],
-                  constants.IDISK_NAME: d.name}
-                 for d in old_disks]
     new_disks = GenerateDiskTemplate(self, self.op.disk_template,
                                      self.instance.uuid, pnode_uuid,
-                                     [snode_uuid], disk_info, None, None, 0,
+                                     [snode_uuid], self.disks_info,
+                                     None, None, 0,
                                      feedback_fn, self.diskparams)
     anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
     p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
@@ -3654,10 +3893,8 @@ class LUInstanceSetParams(LogicalUnit):
     for old_disk in old_disks:
       self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
 
-    # Update instance structure
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-    self.instance.disk_template = constants.DT_DRBD8
-    self.cfg.Update(self.instance, feedback_fn)
+    # Update the disk template of the instance
+    self.cfg.SetInstanceDiskTemplate(self.instance.uuid, constants.DT_DRBD8)
 
     # Attach the new disks to the instance
     for (idx, new_disk) in enumerate(new_disks):
@@ -3687,14 +3924,12 @@ class LUInstanceSetParams(LogicalUnit):
     assert self.instance.disk_template == constants.DT_DRBD8
     assert len(secondary_nodes) == 1 or not self.instance.disks
 
-    pnode_uuid = self.instance.primary_node
-
     # it will not be possible to calculate the snode_uuid later
     snode_uuid = None
     if secondary_nodes:
       snode_uuid = secondary_nodes[0]
 
-    feedback_fn("Converting template to plain")
+    feedback_fn("Converting disk template from 'drbd' to 'plain'")
 
     disks = self.cfg.GetInstanceDisks(self.instance.uuid)
     old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
@@ -3707,7 +3942,6 @@ class LUInstanceSetParams(LogicalUnit):
       child.name = parent.name
 
     # this is a DRBD disk, return its port to the pool
-    # NOTE: this must be done right before the call to cfg.Update!
     for disk in old_disks:
       tcp_port = disk.logical_id[2]
       self.cfg.AddTcpUdpPort(tcp_port)
@@ -3716,10 +3950,8 @@ class LUInstanceSetParams(LogicalUnit):
     for old_disk in old_disks:
       self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
 
-    # Update instance structure
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-    self.instance.disk_template = constants.DT_PLAIN
-    self.cfg.Update(self.instance, feedback_fn)
+    # Update the disk template of the instance
+    self.cfg.SetInstanceDiskTemplate(self.instance.uuid, constants.DT_PLAIN)
 
     # Attach the new disks to the instance
     for (idx, new_disk) in enumerate(new_disks):
@@ -3732,21 +3964,15 @@ class LUInstanceSetParams(LogicalUnit):
     ReleaseLocks(self, locking.LEVEL_NODE)
 
     feedback_fn("Removing volumes on the secondary node...")
-    for disk in old_disks:
-      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
-      result.Warn("Could not remove block device %s on node %s,"
-                  " continuing anyway" %
-                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
-                  self.LogWarning)
+    RemoveDisks(self, self.instance, disk_template=constants.DT_DRBD8,
+                disks=old_disks, target_node_uuid=snode_uuid)
 
     feedback_fn("Removing unneeded volumes on the primary node...")
+    meta_disks = []
     for idx, disk in enumerate(old_disks):
-      meta = disk.children[1]
-      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
-      result.Warn("Could not remove metadata for disk %d on node %s,"
-                  " continuing anyway" %
-                  (idx, self.cfg.GetNodeName(pnode_uuid)),
-                  self.LogWarning)
+      meta_disks.append(disk.children[1])
+    RemoveDisks(self, self.instance, disk_template=constants.DT_DRBD8,
+                disks=meta_disks)
 
   def _HotplugDevice(self, action, dev_type, device, extra, seq):
     self.LogInfo("Trying to hotplug device...")
@@ -3870,15 +4096,7 @@ class LUInstanceSetParams(LogicalUnit):
                                    root, None, idx)
       ShutdownInstanceDisks(self, self.instance, [root])
 
-    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
-    for node_uuid, disk in anno_disk.ComputeNodeTree(
-                             self.instance.primary_node):
-      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
-              .fail_msg
-      if msg:
-        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
-                        " continuing anyway", idx,
-                        self.cfg.GetNodeName(node_uuid), msg)
+    RemoveDisks(self, self.instance, disks=[root])
 
     # if this is a DRBD disk, return its port to the pool
     if root.dev_type in constants.DTS_DRBD:
@@ -4013,7 +4231,10 @@ class LUInstanceSetParams(LogicalUnit):
                                  " proceed with disk template conversion")
       mode = (self.instance.disk_template, self.op.disk_template)
       try:
-        self._DISK_CONVERSIONS[mode](self, feedback_fn)
+        if mode in self._DISK_CONVERSIONS:
+          self._DISK_CONVERSIONS[mode](self, feedback_fn)
+        else:
+          self._ConvertInstanceTemplate(feedback_fn)
       except:
         self.cfg.ReleaseDRBDMinors(self.instance.uuid)
         raise