else:
if not set(disks).issubset(instance.disks):
raise errors.ProgrammerError("Can only act on disks belonging to the"
- " target instance")
+ " target instance: expected a subset of %r,"
+ " got %r" % (instance.disks, disks))
return disks
ignored.
"""
+ lu.cfg.MarkInstanceDisksInactive(instance.name)
all_result = True
disks = ExpandCheckDisks(instance, disks)
# into any other network-connected state (Connected, SyncTarget,
# SyncSource, etc.)
+ # mark instance disks as active before doing actual work, so watcher does
+ # not try to shut them down erroneously
+ lu.cfg.MarkInstanceDisksActive(iname)
+
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
for disk in disks:
lu.cfg.SetDiskID(disk, instance.primary_node)
+ if not disks_ok:
+ lu.cfg.MarkInstanceDisksInactive(iname)
+
return disks_ok, device_info
if wipe_disks:
# Get disk size from primary node for wiping
+ self.cfg.SetDiskID(disk, instance.primary_node)
result = self.rpc.call_blockdev_getsize(instance.primary_node, [disk])
result.Raise("Failed to retrieve disk size from node '%s'" %
instance.primary_node)
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
- if instance.admin_state != constants.ADMINST_UP:
+ if not instance.disks_active:
_SafeShutdownInstanceDisks(self, instance, disks=[disk])
- elif instance.admin_state != constants.ADMINST_UP:
+ elif not instance.disks_active:
self.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
if self.op.wait_for_sync:
if not WaitForSync(self, self.instance):
+ self.cfg.MarkInstanceDisksInactive(self.instance.name)
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
feedback_fn("Current seconary node: %s" %
utils.CommaJoin(self.instance.secondary_nodes))
- activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
+ activate_disks = not self.instance.disks_active
# Activate the instance disks if we're replacing them on a down instance
if activate_disks:
if msg or not result.payload:
if not msg:
msg = "disk not found"
- raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
- (idx, node, msg))
+ if not self._CheckDisksActivated(self.instance):
+ extra_hint = ("\nDisks seem to be not properly activated. Try"
+ " running activate-disks on the instance before"
+ " using replace-disks.")
+ else:
+ extra_hint = ""
+ raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
+ (idx, node, msg, extra_hint))
def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
for idx, dev in enumerate(self.instance.disks):
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
- _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
- GetInstanceInfoText(self.instance), False,
- excl_stor)
+ try:
+ _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
+ GetInstanceInfoText(self.instance), False,
+ excl_stor)
+ except errors.DeviceCreationError, e:
+ raise errors.OpExecError("Can't create block device: %s" % e.message)
return iv_names
(self.new_node, idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
- _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
- True, GetInstanceInfoText(self.instance), False,
- excl_stor)
+ try:
+ _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
+ True, GetInstanceInfoText(self.instance), False,
+ excl_stor)
+ except errors.DeviceCreationError, e:
+ raise errors.OpExecError("Can't create block device: %s" % e.message)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the