QA: Ensure the DRBD secret is not retrievable via RAPI
[ganeti-github.git] / lib / cmdlib / instance_query.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units for querying instances."""
23
24 import itertools
25 import logging
26 import operator
27
28 from ganeti import compat
29 from ganeti import constants
30 from ganeti import locking
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti.cmdlib.base import QueryBase, NoHooksLU
34 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35 CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36 from ganeti.cmdlib.instance_operation import GetInstanceConsole
37 from ganeti.cmdlib.instance_utils import NICListToTuple
38
39 import ganeti.masterd.instance
40
41
42 class InstanceQuery(QueryBase):
43 FIELDS = query.INSTANCE_FIELDS
44
45 def ExpandNames(self, lu):
46 lu.needed_locks = {}
47 lu.share_locks = ShareAll()
48
49 if self.names:
50 (_, self.wanted) = GetWantedInstances(lu, self.names)
51 else:
52 self.wanted = locking.ALL_SET
53
54 self.do_locking = (self.use_locking and
55 query.IQ_LIVE in self.requested_data)
56 if self.do_locking:
57 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59 lu.needed_locks[locking.LEVEL_NODE] = []
60 lu.needed_locks[locking.LEVEL_NETWORK] = []
61 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62
63 self.do_grouplocks = (self.do_locking and
64 query.IQ_NODES in self.requested_data)
65
66 def DeclareLocks(self, lu, level):
67 if self.do_locking:
68 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70
71 # Lock all groups used by instances optimistically; this requires going
72 # via the node before it's locked, requiring verification later on
73 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74 set(group_uuid
75 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76 for group_uuid in
77 lu.cfg.GetInstanceNodeGroups(
78 lu.cfg.GetInstanceInfoByName(instance_name).uuid))
79 elif level == locking.LEVEL_NODE:
80 lu._LockInstancesNodes() # pylint: disable=W0212
81
82 elif level == locking.LEVEL_NETWORK:
83 lu.needed_locks[locking.LEVEL_NETWORK] = \
84 frozenset(net_uuid
85 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
86 for net_uuid in
87 lu.cfg.GetInstanceNetworks(
88 lu.cfg.GetInstanceInfoByName(instance_name).uuid))
89
90 @staticmethod
91 def _CheckGroupLocks(lu):
92 owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
93 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
94
95 # Check if node groups for locked instances are still correct
96 for instance_name in owned_instance_names:
97 instance = lu.cfg.GetInstanceInfoByName(instance_name)
98 CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups)
99
100 def _GetQueryData(self, lu):
101 """Computes the list of instances and their attributes.
102
103 """
104 if self.do_grouplocks:
105 self._CheckGroupLocks(lu)
106
107 cluster = lu.cfg.GetClusterInfo()
108 insts_by_name = dict((inst.name, inst) for
109 inst in lu.cfg.GetAllInstancesInfo().values())
110
111 instance_names = self._GetNames(lu, insts_by_name.keys(),
112 locking.LEVEL_INSTANCE)
113
114 instance_list = [insts_by_name[node] for node in instance_names]
115 node_uuids = frozenset(itertools.chain(*(inst.all_nodes
116 for inst in instance_list)))
117 hv_list = list(set([inst.hypervisor for inst in instance_list]))
118 bad_node_uuids = []
119 offline_node_uuids = []
120 wrongnode_inst_uuids = set()
121
122 # Gather data as requested
123 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
124 live_data = {}
125 node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
126 cluster.hvparams)
127 for node_uuid in node_uuids:
128 result = node_data[node_uuid]
129 if result.offline:
130 # offline nodes will be in both lists
131 assert result.fail_msg
132 offline_node_uuids.append(node_uuid)
133 if result.fail_msg:
134 bad_node_uuids.append(node_uuid)
135 elif result.payload:
136 for inst_name in result.payload:
137 if inst_name in insts_by_name:
138 instance = insts_by_name[inst_name]
139 if instance.primary_node == node_uuid:
140 for iname in result.payload:
141 live_data[insts_by_name[iname].uuid] = result.payload[iname]
142 else:
143 wrongnode_inst_uuids.add(instance.uuid)
144 else:
145 # orphan instance; we don't list it here as we don't
146 # handle this case yet in the output of instance listing
147 logging.warning("Orphan instance '%s' found on node %s",
148 inst_name, lu.cfg.GetNodeName(node_uuid))
149 # else no instance is alive
150 else:
151 live_data = {}
152
153 if query.IQ_DISKUSAGE in self.requested_data:
154 gmi = ganeti.masterd.instance
155 disk_usage = dict((inst.uuid,
156 gmi.ComputeDiskSize(inst.disk_template,
157 [{constants.IDISK_SIZE: disk.size}
158 for disk in inst.disks]))
159 for inst in instance_list)
160 else:
161 disk_usage = None
162
163 if query.IQ_CONSOLE in self.requested_data:
164 consinfo = {}
165 for inst in instance_list:
166 if inst.uuid in live_data:
167 # Instance is running
168 consinfo[inst.uuid] = \
169 GetInstanceConsole(cluster, inst,
170 lu.cfg.GetNodeInfo(inst.primary_node))
171 else:
172 consinfo[inst.uuid] = None
173 else:
174 consinfo = None
175
176 if query.IQ_NODES in self.requested_data:
177 nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
178 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
179 for uuid in set(map(operator.attrgetter("group"),
180 nodes.values())))
181 else:
182 nodes = None
183 groups = None
184
185 if query.IQ_NETWORKS in self.requested_data:
186 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
187 for i in instance_list))
188 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
189 else:
190 networks = None
191
192 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
193 disk_usage, offline_node_uuids,
194 bad_node_uuids, live_data,
195 wrongnode_inst_uuids, consinfo, nodes,
196 groups, networks)
197
198
199 class LUInstanceQuery(NoHooksLU):
200 """Logical unit for querying instances.
201
202 """
203 # pylint: disable=W0142
204 REQ_BGL = False
205
206 def CheckArguments(self):
207 self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
208 self.op.output_fields, self.op.use_locking)
209
210 def ExpandNames(self):
211 self.iq.ExpandNames(self)
212
213 def DeclareLocks(self, level):
214 self.iq.DeclareLocks(self, level)
215
216 def Exec(self, feedback_fn):
217 return self.iq.OldStyleQuery(self)
218
219
220 class LUInstanceQueryData(NoHooksLU):
221 """Query runtime instance data.
222
223 """
224 REQ_BGL = False
225
226 def ExpandNames(self):
227 self.needed_locks = {}
228
229 # Use locking if requested or when non-static information is wanted
230 if not (self.op.static or self.op.use_locking):
231 self.LogWarning("Non-static data requested, locks need to be acquired")
232 self.op.use_locking = True
233
234 if self.op.instances or not self.op.use_locking:
235 # Expand instance names right here
236 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
237 else:
238 # Will use acquired locks
239 self.wanted_names = None
240
241 if self.op.use_locking:
242 self.share_locks = ShareAll()
243
244 if self.wanted_names is None:
245 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
246 else:
247 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
248
249 self.needed_locks[locking.LEVEL_NODEGROUP] = []
250 self.needed_locks[locking.LEVEL_NODE] = []
251 self.needed_locks[locking.LEVEL_NETWORK] = []
252 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
253
254 def DeclareLocks(self, level):
255 if self.op.use_locking:
256 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
257 self.owned_locks(locking.LEVEL_INSTANCE)))
258 if level == locking.LEVEL_NODEGROUP:
259
260 # Lock all groups used by instances optimistically; this requires going
261 # via the node before it's locked, requiring verification later on
262 self.needed_locks[locking.LEVEL_NODEGROUP] = \
263 frozenset(group_uuid
264 for instance_uuid in owned_instances.keys()
265 for group_uuid in
266 self.cfg.GetInstanceNodeGroups(instance_uuid))
267
268 elif level == locking.LEVEL_NODE:
269 self._LockInstancesNodes()
270
271 elif level == locking.LEVEL_NETWORK:
272 self.needed_locks[locking.LEVEL_NETWORK] = \
273 frozenset(net_uuid
274 for instance_uuid in owned_instances.keys()
275 for net_uuid in
276 self.cfg.GetInstanceNetworks(instance_uuid))
277
278 def CheckPrereq(self):
279 """Check prerequisites.
280
281 This only checks the optional instance list against the existing names.
282
283 """
284 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
285 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
286 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
287 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
288
289 if self.wanted_names is None:
290 assert self.op.use_locking, "Locking was not used"
291 self.wanted_names = owned_instances
292
293 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
294
295 if self.op.use_locking:
296 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
297 owned_node_uuids, None)
298 else:
299 assert not (owned_instances or owned_groups or
300 owned_node_uuids or owned_networks)
301
302 self.wanted_instances = instances.values()
303
304 def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
305 """Returns the status of a block device
306
307 """
308 if self.op.static or not node_uuid:
309 return None
310
311 self.cfg.SetDiskID(dev, node_uuid)
312
313 result = self.rpc.call_blockdev_find(node_uuid, dev)
314 if result.offline:
315 return None
316
317 result.Raise("Can't compute disk status for %s" % instance.name)
318
319 status = result.payload
320 if status is None:
321 return None
322
323 return (status.dev_path, status.major, status.minor,
324 status.sync_percent, status.estimated_time,
325 status.is_degraded, status.ldisk_status)
326
327 def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
328 """Compute block device status.
329
330 """
331 (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
332
333 return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
334 anno_dev)
335
336 def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
337 dev):
338 """Compute block device status.
339
340 @attention: The device has to be annotated already.
341
342 """
343 drbd_info = None
344 output_logical_id = dev.logical_id
345 output_physical_id = dev.physical_id
346 if dev.dev_type in constants.DTS_DRBD:
347 # we change the snode then (otherwise we use the one passed in)
348 if dev.logical_id[0] == instance.primary_node:
349 snode_uuid = dev.logical_id[1]
350 else:
351 snode_uuid = dev.logical_id[0]
352 drbd_info = {
353 "primary_node": node_uuid2name_fn(instance.primary_node),
354 "primary_minor": dev.logical_id[3],
355 "secondary_node": node_uuid2name_fn(snode_uuid),
356 "secondary_minor": dev.logical_id[4],
357 "port": dev.logical_id[2],
358 }
359 # replace the secret present at the end of the ids with None
360 output_logical_id = dev.logical_id[:-1] + (None,)
361 output_physical_id = dev.physical_id[:-1] + (None,)
362
363 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
364 instance, dev)
365 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
366
367 if dev.children:
368 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
369 instance, snode_uuid,
370 node_uuid2name_fn),
371 dev.children)
372 else:
373 dev_children = []
374
375 return {
376 "iv_name": dev.iv_name,
377 "dev_type": dev.dev_type,
378 "logical_id": output_logical_id,
379 "drbd_info": drbd_info,
380 "physical_id": output_physical_id,
381 "pstatus": dev_pstatus,
382 "sstatus": dev_sstatus,
383 "children": dev_children,
384 "mode": dev.mode,
385 "size": dev.size,
386 "spindles": dev.spindles,
387 "name": dev.name,
388 "uuid": dev.uuid,
389 }
390
391 def Exec(self, feedback_fn):
392 """Gather and return data"""
393 result = {}
394
395 cluster = self.cfg.GetClusterInfo()
396
397 node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
398 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
399
400 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
401 for node in nodes.values()))
402
403 for instance in self.wanted_instances:
404 pnode = nodes[instance.primary_node]
405
406 if self.op.static or pnode.offline:
407 remote_state = None
408 if pnode.offline:
409 self.LogWarning("Primary node %s is marked offline, returning static"
410 " information only for instance %s" %
411 (pnode.name, instance.name))
412 else:
413 remote_info = self.rpc.call_instance_info(
414 instance.primary_node, instance.name, instance.hypervisor,
415 cluster.hvparams[instance.hypervisor])
416 remote_info.Raise("Error checking node %s" % pnode.name)
417 remote_info = remote_info.payload
418 if remote_info and "state" in remote_info:
419 remote_state = "up"
420 else:
421 if instance.admin_state == constants.ADMINST_UP:
422 remote_state = "down"
423 else:
424 remote_state = instance.admin_state
425
426 group2name_fn = lambda uuid: groups[uuid].name
427 node_uuid2name_fn = lambda uuid: nodes[uuid].name
428
429 disks = map(compat.partial(self._ComputeDiskStatus, instance,
430 node_uuid2name_fn),
431 instance.disks)
432
433 snodes_group_uuids = [nodes[snode_uuid].group
434 for snode_uuid in instance.secondary_nodes]
435
436 result[instance.name] = {
437 "name": instance.name,
438 "config_state": instance.admin_state,
439 "run_state": remote_state,
440 "pnode": pnode.name,
441 "pnode_group_uuid": pnode.group,
442 "pnode_group_name": group2name_fn(pnode.group),
443 "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
444 "snodes_group_uuids": snodes_group_uuids,
445 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
446 "os": instance.os,
447 # this happens to be the same format used for hooks
448 "nics": NICListToTuple(self, instance.nics),
449 "disk_template": instance.disk_template,
450 "disks": disks,
451 "hypervisor": instance.hypervisor,
452 "network_port": instance.network_port,
453 "hv_instance": instance.hvparams,
454 "hv_actual": cluster.FillHV(instance, skip_globals=True),
455 "be_instance": instance.beparams,
456 "be_actual": cluster.FillBE(instance),
457 "os_instance": instance.osparams,
458 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
459 "serial_no": instance.serial_no,
460 "mtime": instance.mtime,
461 "ctime": instance.ctime,
462 "uuid": instance.uuid,
463 }
464
465 return result