master_capable=opts.master_capable,
disk_state=disk_state,
hv_state=hv_state,
- node_setup=modify_ssh_setup)
+ node_setup=modify_ssh_setup,
+ verbose=opts.verbose,
+ debug=opts.debug > 0)
SubmitOpCode(op, opts=opts)
@return: the desired exit code
"""
- op = opcodes.OpNodeRemove(node_name=args[0])
+ op = opcodes.OpNodeRemove(node_name=args[0],
+ debug=opts.debug > 0,
+ verbose=opts.verbose)
SubmitOpCode(op, opts=opts)
return 0
auto_promote=opts.auto_promote,
powered=opts.node_powered,
hv_state=hv_state,
- disk_state=disk_state)
+ disk_state=disk_state,
+ verbose=opts.verbose,
+ debug=opts.debug > 0)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
CAPAB_MASTER_OPT, CAPAB_VM_OPT, NODE_PARAMS_OPT, HV_STATE_OPT,
DISK_STATE_OPT],
"[-s ip] [--readd] [--no-ssh-key-check] [--force-join]"
- " [--no-node-setup] [--verbose] [--network] <node_name>",
+ " [--no-node-setup] [--verbose] [--network] [--debug] <node_name>",
"Add a node to the cluster"),
"evacuate": (
EvacuateNode, ARGS_ONE_NODE,
[MC_OPT, DRAINED_OPT, OFFLINE_OPT,
CAPAB_MASTER_OPT, CAPAB_VM_OPT, SECONDARY_IP_OPT,
AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NODE_PARAMS_OPT,
- NODE_POWERED_OPT, HV_STATE_OPT, DISK_STATE_OPT],
+ NODE_POWERED_OPT, HV_STATE_OPT, DISK_STATE_OPT, VERBOSE_OPT],
"<node_name>", "Alters the parameters of a node"),
"powercycle": (
PowercycleNode, ARGS_ONE_NODE,
"on|off|cycle|status [nodes...]",
"Change power state of node by calling out-of-band helper."),
"remove": (
- RemoveNode, ARGS_ONE_NODE, [DRY_RUN_OPT, PRIORITY_OPT],
- "<node_name>", "Removes a node from the cluster"),
+ RemoveNode, ARGS_ONE_NODE, [DRY_RUN_OPT, PRIORITY_OPT, VERBOSE_OPT],
+ "[--verbose] [--debug] <node_name>", "Removes a node from the cluster"),
"volumes": (
ListVolumes, [ArgNode()],
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, PRIORITY_OPT],
potential_master_candidates,
True, # add node's key to all node's 'authorized_keys'
True, # all nodes are potential master candidates
- False) # do not update the node's public keys
+ False, # do not update the node's public keys
+ lu.op.debug,
+ lu.op.verbose)
ssh_result[master_node].Raise(
"Could not update the SSH setup of node '%s' after promotion"
" (UUID: %s)." % (node.name, node.uuid))
True, # from public keys
False, # clear authorized keys
True, # clear public keys
- True) # it's a readd
+ True, # it's a readd
+ self.op.debug,
+ self.op.verbose)
remove_result[master_node].Raise(
"Could not remove SSH keys of node %s before readding,"
" (UUID: %s)." % (new_node_name, new_node_uuid))
[master_node], new_node_uuid, new_node_name,
potential_master_candidates,
is_master_candidate, is_potential_master_candidate,
- is_potential_master_candidate)
+ is_potential_master_candidate, self.op.debug, self.op.verbose)
result[master_node].Raise("Could not update the node's SSH setup.")
WarnAboutFailedSshUpdates(result, master_node, feedback_fn)
False, # currently, all nodes are potential master candidates
False, # do not clear node's 'authorized_keys'
False, # do not clear node's 'ganeti_pub_keys'
- False) # no readd
+ False, # no readd
+ self.op.debug,
+ self.op.verbose)
ssh_result[master_node].Raise(
"Could not adjust the SSH setup after demoting node '%s'"
" (UUID: %s)." % (node.name, node.uuid))
potential_master_candidate, # from_public_keys
True, # clear node's 'authorized_keys'
True, # clear node's 'ganeti_public_keys'
- False) # no readd
+ False, # no readd
+ self.op.debug,
+ self.op.verbose)
result[master_node].Raise(
"Could not remove the SSH key of node '%s' (UUID: %s)." %
(self.op.node_name, self.node.uuid))
("to_public_keys", None, "Whether the node's key should be added"
" to all nodes' public key file"),
("get_public_keys", None, "Whether the node should get the other nodes'"
- " public keys")],
+ " public keys"),
+ ("debug", None, "Set loglevel of ssh calls to 'debug'."),
+ ("verbose", None, "Set loglevel of ssh calls to 'verbose'.")],
None, None, "Distribute a new node's public SSH key on the cluster."),
("node_ssh_key_remove", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is removed"),
("clear_public_keys", None,
"If the 'ganeti_pub_keys' file of the node should be cleared."),
("readd", None,
- "Whether this is a readd operation.")],
+ "Whether this is a readd operation."),
+ ("debug", None, "Set loglevel of ssh calls to 'debug'."),
+ ("verbose", None, "Set loglevel of ssh calls to 'verbose'.")],
None, None, "Remove a node's SSH key from the other nodes' key files."),
("node_ssh_keys_renew", MULTI, None, constants.RPC_TMO_4HRS, [
("node_uuids", None, "UUIDs of the nodes whose key is renewed"),
"""
(node_uuid, node_name, potential_master_candidates,
- to_authorized_keys, to_public_keys, get_public_keys) = params
+ to_authorized_keys, to_public_keys, get_public_keys,
+ debug, verbose) = params
return backend.AddNodeSshKey(node_uuid, node_name,
potential_master_candidates,
to_authorized_keys=to_authorized_keys,
to_public_keys=to_public_keys,
- get_public_keys=get_public_keys)
+ get_public_keys=get_public_keys,
+ ssh_update_debug=debug,
+ ssh_update_verbose=verbose)
@staticmethod
def perspective_node_ssh_keys_renew(params):
(node_uuid, node_name,
master_candidate_uuids, potential_master_candidates,
from_authorized_keys, from_public_keys, clear_authorized_keys,
- clear_public_keys, readd) = params
+ clear_public_keys, readd, debug, verbose) = params
return backend.RemoveNodeSshKey(node_uuid, node_name,
master_candidate_uuids,
potential_master_candidates,
from_public_keys=from_public_keys,
clear_authorized_keys=clear_authorized_keys,
clear_public_keys=clear_public_keys,
- readd=readd)
+ readd=readd,
+ ssh_update_debug=debug,
+ ssh_update_verbose=verbose)
# cluster --------------------------
# Identifying the node - RAPI provides these itself
IDENTIFIERS = ["node_name", "node_uuid"]
# As the name states, these can be set but not retrieved yet
- NOT_EXPOSED_YET = ["hv_state", "disk_state", "auto_promote"]
+ NOT_EXPOSED_YET = ["hv_state", "disk_state", "auto_promote",
+ "debug", "verbose"]
_DoGetPutTests("/2/nodes/%s" % node.primary,
"/2/nodes/%s/modify" % node.primary,
, OpCodes.opSecondaryIp = Nothing
, OpCodes.opgenericNdParams = Nothing
, OpCodes.opPowered = Nothing
+ , OpCodes.opVerbose = False
+ , OpCodes.opDebug = False
}
-- | Generate OpCode for applying a OobCommand to the given nodes
, opSecondaryIp = Nothing
, opgenericNdParams = Nothing
, opPowered = Nothing
+ , opVerbose = False
+ , opDebug = False
} ]
-- | Submit and register the next job for a node evacuation.
OpDoc.opNodeRemove,
[ pNodeName
, pNodeUuid
+ , pVerbose
+ , pDebug
],
"node_name")
, ("OpNodeAdd",
, pVmCapable
, pNdParams
, pNodeSetup
+ , pVerbose
+ , pDebug
],
"node_name")
, ("OpNodeQueryvols",
, pSecondaryIp
, pNdParams
, pPowered
+ , pVerbose
+ , pDebug
],
"node_name")
, ("OpNodePowercycle",
arbitrary <*> arbitrary <*> arbitrary <*>
(arbitrary `suchThat` (>0))
"OP_NODE_REMOVE" ->
- OpCodes.OpNodeRemove <$> genNodeNameNE <*> return Nothing
+ OpCodes.OpNodeRemove <$> genNodeNameNE <*> return Nothing <*>
+ arbitrary <*> arbitrary
"OP_NODE_ADD" ->
OpCodes.OpNodeAdd <$> genNodeNameNE <*> emptyMUD <*> emptyMUD <*>
genMaybe genNameNE <*> genMaybe genNameNE <*> arbitrary <*>
genMaybe genNameNE <*> arbitrary <*> arbitrary <*> emptyMUD <*>
- arbitrary
+ arbitrary <*> arbitrary <*> arbitrary
"OP_NODE_QUERYVOLS" ->
OpCodes.OpNodeQueryvols <$> genNamesNE <*> genNodeNamesNE
"OP_NODE_QUERY_STORAGE" ->
OpCodes.OpNodeSetParams <$> genNodeNameNE <*> return Nothing <*>
arbitrary <*> emptyMUD <*> emptyMUD <*> arbitrary <*> arbitrary <*>
arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
- genMaybe genNameNE <*> emptyMUD <*> arbitrary
+ genMaybe genNameNE <*> emptyMUD <*> arbitrary <*> arbitrary <*>
+ arbitrary
"OP_NODE_POWERCYCLE" ->
OpCodes.OpNodePowercycle <$> genNodeNameNE <*> return Nothing <*>
arbitrary