_Fail("Hypervisor error: %s", err, exc=True)
-def InstanceShutdown(instance, timeout):
+def InstanceShutdown(instance, timeout, reason, store_reason=True):
"""Shut an instance down.
@note: this functions uses polling with a hardcoded timeout.
@param instance: the instance object
@type timeout: integer
@param timeout: maximum timeout for soft shutdown
+ @type reason: list of reasons
+ @param reason: the reason trail for this shutdown
+ @type store_reason: boolean
+ @param store_reason: whether to store the shutdown reason trail on file
@rtype: None
"""
try:
hyper.StopInstance(instance, retry=self.tried_once)
+ if store_reason:
+ _StoreInstReasonTrail(instance.name, reason)
except errors.HypervisorError, err:
if iname not in hyper.ListInstances():
# if the instance is no longer existing, consider this a
_Fail("Failed to soft reboot instance %s: %s", instance.name, err)
elif reboot_type == constants.INSTANCE_REBOOT_HARD:
try:
- InstanceShutdown(instance, shutdown_timeout)
+ InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
result = StartInstance(instance, False)
_StoreInstReasonTrail(instance.name, reason)
return result
else:
if instance_running:
result = self.rpc.call_instance_shutdown(node_current, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ reason)
result.Raise("Could not shutdown instance for full reboot")
_ShutdownInstanceDisks(self, instance)
else:
instance = self.instance
node_current = instance.primary_node
timeout = self.op.timeout
+ reason = self.op.reason
# If the instance is offline we shouldn't mark it as down, as that
# resets the offline flag.
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as stopped")
else:
- result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
+ result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
+ reason)
msg = result.fail_msg
if msg:
self.LogWarning("Could not shutdown instance: %s", msg)
instance.name, instance.primary_node)
result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ self.op.reason)
msg = result.fail_msg
if msg:
if self.op.ignore_failures:
self.owned_locks(locking.LEVEL_NODE_RES))
result = self.rpc.call_instance_shutdown(source_node, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ self.op.reason)
msg = result.fail_msg
if msg:
if self.op.ignore_consistency:
instance.name, source_node)
result = self.rpc.call_instance_shutdown(source_node, instance,
- self.shutdown_timeout)
+ self.shutdown_timeout,
+ self.lu.op.reason)
msg = result.fail_msg
if msg:
if self.ignore_consistency or primary_node.offline:
# shutdown the instance, but not the disks
feedback_fn("Shutting down instance %s" % instance.name)
result = self.rpc.call_instance_shutdown(src_node, instance,
- self.op.shutdown_timeout)
+ self.op.shutdown_timeout,
+ self.op.reason)
# TODO: Maybe ignore failures if ignore_remove_failures is set
result.Raise("Could not shutdown instance %s on"
" node %s" % (instance.name, src_node))
(GANETI_RAPI_VERSION, instance)), query, None)
def ShutdownInstance(self, instance, dry_run=False, no_remember=False,
- **kwargs):
+ reason=None, **kwargs):
"""Shuts down an instance.
@type instance: str
@param dry_run: whether to perform a dry run
@type no_remember: bool
@param no_remember: if true, will not record the state change
+ @type reason: string
+ @param reason: the reason for the shutdown
@rtype: string
@return: job id
_AppendDryRunIf(query, dry_run)
_AppendIf(query, no_remember, ("no_remember", 1))
+ _AppendIf(query, reason, ("reason", reason))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/shutdown" %
("instance_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("timeout", None, None),
+ ("reason", None, "The reason for the shutdown"),
], None, None, "Stops an instance"),
("instance_balloon_memory", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
"""
instance = objects.Instance.FromDict(params[0])
timeout = params[1]
- return backend.InstanceShutdown(instance, timeout)
+ trail = params[2]
+ _extendReasonTrail(trail, "shutdown")
+ return backend.InstanceShutdown(instance, timeout, trail)
@staticmethod
def perspective_instance_start(params):
def testShutdownInstance(self):
self.rapi.AddResponse("1487")
self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
+ dry_run=True,
+ reason="NoMore"))
+ self.assertHandler(rlib2.R_2_instances_name_shutdown)
+ self.assertItems(["foo-instance"])
+ self.assertDryRun()
+ self.assertQuery("reason", ["NoMore"])
+
+ def testShutdownInstanceDefaultReason(self):
+ self.rapi.AddResponse("1487")
+ self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_shutdown)
self.assertItems(["foo-instance"])
self.assertDryRun()
+ self.assertQuery("reason", None)
def testStartupInstance(self):
self.rapi.AddResponse("27149")
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_shutdown, ["inst26791"], {
"no_remember": ["0"],
+ "reason": ["Not used anymore"],
}, {}, clfactory)
job_id = handler.PUT()
self.assertEqual(op.instance_name, "inst26791")
self.assertFalse(op.no_remember)
self.assertFalse(op.dry_run)
+ self.assertEqual(op.reason[0][0], constants.OPCODE_REASON_SRC_USER)
+ self.assertEqual(op.reason[0][1], "Not used anymore")
+ self.assertEqual(op.reason[1][0],
+ "%s:%s" % (constants.OPCODE_REASON_SRC_RLIB2,
+ "instances_name_shutdown"))
+ self.assertEqual(op.reason[1][1], "")
self.assertRaises(IndexError, cl.GetNextSubmittedJob)