Add default file_driver if missing
[ganeti-github.git] / lib / cmdlib / instance.py
index 0b1966d..e922800 100644 (file)
@@ -51,11 +51,10 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
   AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
   ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
 from ganeti.cmdlib.instance_storage import CreateDisks, \
-  CheckNodesFreeDiskPerVG, WipeDisks, WaitForSync, \
+  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
   IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
   CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
-  CreateBlockDev, StartInstanceDisks, ShutdownInstanceDisks, \
-  AssembleInstanceDisks
+  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
   GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
   NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
@@ -405,6 +404,12 @@ class LUInstanceCreate(LogicalUnit):
       raise errors.OpPrereqError("Invalid file driver name '%s'" %
                                  self.op.file_driver, errors.ECODE_INVAL)
 
+    # set default file_driver if unset and required
+    if (not self.op.file_driver and
+        self.op.disk_template in [constants.DT_FILE,
+                                  constants.DT_SHARED_FILE]):
+      self.op.file_driver = constants.FD_DEFAULT
+
     if self.op.disk_template == constants.DT_FILE:
       opcodes.RequireFileStorage()
     elif self.op.disk_template == constants.DT_SHARED_FILE:
@@ -519,7 +524,6 @@ class LUInstanceCreate(LogicalUnit):
 
       if self.op.opportunistic_locking:
         self.opportunistic_locks[locking.LEVEL_NODE] = True
-        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
       self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
       nodelist = [self.op.pnode]
@@ -555,6 +559,14 @@ class LUInstanceCreate(LogicalUnit):
     self.needed_locks[locking.LEVEL_NODE_RES] = \
       CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES and \
+      self.opportunistic_locks[locking.LEVEL_NODE]:
+      # Even when using opportunistic locking, we require the same set of
+      # NODE_RES locks as we got NODE locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        self.owned_locks(locking.LEVEL_NODE)
+
   def _RunAllocator(self):
     """Run the allocator based on input opcode.
 
@@ -621,8 +633,9 @@ class LUInstanceCreate(LogicalUnit):
       vcpus=self.be_full[constants.BE_VCPUS],
       nics=NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
-      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
-              d[constants.IDISK_MODE]) for d in self.disks],
+      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
+              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
+             for d in self.disks],
       bep=self.be_full,
       hvp=self.hv_full,
       hypervisor_name=self.op.hypervisor,
@@ -1198,6 +1211,7 @@ class LUInstanceCreate(LogicalUnit):
                             primary_node=pnode_name,
                             nics=self.nics, disks=disks,
                             disk_template=self.op.disk_template,
+                            disks_active=False,
                             admin_state=constants.ADMINST_DOWN,
                             network_port=network_port,
                             beparams=self.op.beparams,
@@ -1277,6 +1291,9 @@ class LUInstanceCreate(LogicalUnit):
       raise errors.OpExecError("There are some degraded disks for"
                                " this instance")
 
+    # instance disks are now active
+    iobj.disks_active = True
+
     # Release all node resource locks
     ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
@@ -1827,9 +1844,9 @@ class LUInstanceMultiAlloc(NoHooksLU):
                                  " pnode/snode while others do not",
                                  errors.ECODE_INVAL)
 
-    if self.op.iallocator is None:
+    if not has_nodes and self.op.iallocator is None:
       default_iallocator = self.cfg.GetDefaultIAllocator()
-      if default_iallocator and has_nodes:
+      if default_iallocator:
         self.op.iallocator = default_iallocator
       else:
         raise errors.OpPrereqError("No iallocator or nodes on the instances"
@@ -1863,7 +1880,6 @@ class LUInstanceMultiAlloc(NoHooksLU):
 
       if self.op.opportunistic_locking:
         self.opportunistic_locks[locking.LEVEL_NODE] = True
-        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
       nodeslist = []
       for inst in self.op.instances:
@@ -1878,39 +1894,48 @@ class LUInstanceMultiAlloc(NoHooksLU):
       # prevent accidential modification)
       self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
 
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES and \
+      self.opportunistic_locks[locking.LEVEL_NODE]:
+      # Even when using opportunistic locking, we require the same set of
+      # NODE_RES locks as we got NODE locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        self.owned_locks(locking.LEVEL_NODE)
+
   def CheckPrereq(self):
     """Check prerequisite.
 
     """
-    cluster = self.cfg.GetClusterInfo()
-    default_vg = self.cfg.GetVGName()
-    ec_id = self.proc.GetECId()
+    if self.op.iallocator:
+      cluster = self.cfg.GetClusterInfo()
+      default_vg = self.cfg.GetVGName()
+      ec_id = self.proc.GetECId()
 
-    if self.op.opportunistic_locking:
-      # Only consider nodes for which a lock is held
-      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
-    else:
-      node_whitelist = None
+      if self.op.opportunistic_locking:
+        # Only consider nodes for which a lock is held
+        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
+      else:
+        node_whitelist = None
 
-    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
-                                         _ComputeNics(op, cluster, None,
-                                                      self.cfg, ec_id),
-                                         _ComputeFullBeParams(op, cluster),
-                                         node_whitelist)
-             for op in self.op.instances]
+      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+                                           _ComputeNics(op, cluster, None,
+                                                        self.cfg, ec_id),
+                                           _ComputeFullBeParams(op, cluster),
+                                           node_whitelist)
+               for op in self.op.instances]
 
-    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
-    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
-    ial.Run(self.op.iallocator)
+      ial.Run(self.op.iallocator)
 
-    if not ial.success:
-      raise errors.OpPrereqError("Can't compute nodes using"
-                                 " iallocator '%s': %s" %
-                                 (self.op.iallocator, ial.info),
-                                 errors.ECODE_NORES)
+      if not ial.success:
+        raise errors.OpPrereqError("Can't compute nodes using"
+                                   " iallocator '%s': %s" %
+                                   (self.op.iallocator, ial.info),
+                                   errors.ECODE_NORES)
 
-    self.ia_result = ial.result
+      self.ia_result = ial.result
 
     if self.op.dry_run:
       self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
@@ -1921,34 +1946,43 @@ class LUInstanceMultiAlloc(NoHooksLU):
     """Contructs the partial result.
 
     """
-    (allocatable, failed) = self.ia_result
+    if self.op.iallocator:
+      (allocatable, failed_insts) = self.ia_result
+      allocatable_insts = map(compat.fst, allocatable)
+    else:
+      allocatable_insts = [op.instance_name for op in self.op.instances]
+      failed_insts = []
+
     return {
-      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
-        map(compat.fst, allocatable),
-      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
+      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
+      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
       }
 
   def Exec(self, feedback_fn):
     """Executes the opcode.
 
     """
-    op2inst = dict((op.instance_name, op) for op in self.op.instances)
-    (allocatable, failed) = self.ia_result
-
     jobs = []
-    for (name, nodes) in allocatable:
-      op = op2inst.pop(name)
+    if self.op.iallocator:
+      op2inst = dict((op.instance_name, op) for op in self.op.instances)
+      (allocatable, failed) = self.ia_result
 
-      if len(nodes) > 1:
-        (op.pnode, op.snode) = nodes
-      else:
-        (op.pnode,) = nodes
+      for (name, nodes) in allocatable:
+        op = op2inst.pop(name)
 
-      jobs.append([op])
+        if len(nodes) > 1:
+          (op.pnode, op.snode) = nodes
+        else:
+          (op.pnode,) = nodes
 
-    missing = set(op2inst.keys()) - set(failed)
-    assert not missing, \
-      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
+        jobs.append([op])
+
+      missing = set(op2inst.keys()) - set(failed)
+      assert not missing, \
+        "Iallocator did return incomplete result: %s" % \
+        utils.CommaJoin(missing)
+    else:
+      jobs.extend([op] for op in self.op.instances)
 
     return ResultWithJobs(jobs, **self._ConstructPartialResult())
 
@@ -2302,8 +2336,8 @@ class LUInstanceSetParams(LogicalUnit):
   def CheckArguments(self):
     if not (self.op.nics or self.op.disks or self.op.disk_template or
             self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.offline is not None or self.op.runtime_mem or
-            self.op.pnode):
+            self.op.osparams or self.op.offline is not None or
+            self.op.runtime_mem or self.op.pnode):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
@@ -3102,24 +3136,13 @@ class LUInstanceSetParams(LogicalUnit):
                            [params], file_path, file_driver, idx,
                            self.Log, self.diskparams)[0]
 
-    info = GetInstanceInfoText(instance)
-
-    logging.info("Creating volume %s for instance %s",
-                 disk.iv_name, instance.name)
-    # Note: this needs to be kept in sync with _CreateDisks
-    #HARDCODE
-    for node in instance.all_nodes:
-      f_create = (node == instance.primary_node)
-      try:
-        CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
-      except errors.OpExecError, err:
-        self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
-                        disk.iv_name, disk, node, err)
+    new_disks = CreateDisks(self, instance, disks=[disk])
 
     if self.cluster.prealloc_wipe_disks:
       # Wipe new disk
-      WipeDisks(self, instance,
-                disks=[(idx, disk, 0)])
+      WipeOrCleanupDisks(self, instance,
+                         disks=[(idx, disk, 0)],
+                         cleanup=new_disks)
 
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
@@ -3131,14 +3154,13 @@ class LUInstanceSetParams(LogicalUnit):
 
     """
     changes = []
-    mode = params.get(constants.IDISK_MODE, None)
-    if mode:
-      disk.mode = mode
+    if constants.IDISK_MODE in params:
+      disk.mode = params.get(constants.IDISK_MODE)
       changes.append(("disk.mode/%d" % idx, disk.mode))
 
-    name = params.get(constants.IDISK_NAME, None)
-    disk.name = name
-    changes.append(("disk.name/%d" % idx, disk.name))
+    if constants.IDISK_NAME in params:
+      disk.name = params.get(constants.IDISK_NAME)
+      changes.append(("disk.name/%d" % idx, disk.name))
 
     return changes