Merge branch 'stable-2.11' into stable-2.12
[ganeti-github.git] / lib / backend.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Functions used by the node daemon
32
33 @var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
34 the L{UploadFile} function
35 @var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
36 in the L{_CleanDirectory} function
37
38 """
39
40 # pylint: disable=E1103,C0302
41
42 # E1103: %s %r has no %r member (but some types could not be
43 # inferred), because the _TryOSFromDisk returns either (True, os_obj)
44 # or (False, "string") which confuses pylint
45
46 # C0302: This module has become too big and should be split up
47
48
49 import base64
50 import errno
51 import logging
52 import os
53 import os.path
54 import pycurl
55 import random
56 import re
57 import shutil
58 import signal
59 import socket
60 import stat
61 import tempfile
62 import time
63 import zlib
64
65 from ganeti import errors
66 from ganeti import http
67 from ganeti import utils
68 from ganeti import ssh
69 from ganeti import hypervisor
70 from ganeti.hypervisor import hv_base
71 from ganeti import constants
72 from ganeti.storage import bdev
73 from ganeti.storage import drbd
74 from ganeti.storage import filestorage
75 from ganeti import objects
76 from ganeti import ssconf
77 from ganeti import serializer
78 from ganeti import netutils
79 from ganeti import runtime
80 from ganeti import compat
81 from ganeti import pathutils
82 from ganeti import vcluster
83 from ganeti import ht
84 from ganeti.storage.base import BlockDev
85 from ganeti.storage.drbd import DRBD8
86 from ganeti import hooksmaster
87 from ganeti.rpc import transport
88 from ganeti.rpc.errors import NoMasterError, TimeoutError
89
90
91 _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
92 _ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
93 pathutils.DATA_DIR,
94 pathutils.JOB_QUEUE_ARCHIVE_DIR,
95 pathutils.QUEUE_DIR,
96 pathutils.CRYPTO_KEYS_DIR,
97 ])
98 _MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
99 _X509_KEY_FILE = "key"
100 _X509_CERT_FILE = "cert"
101 _IES_STATUS_FILE = "status"
102 _IES_PID_FILE = "pid"
103 _IES_CA_FILE = "ca"
104
105 #: Valid LVS output line regex
106 _LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
107
108 # Actions for the master setup script
109 _MASTER_START = "start"
110 _MASTER_STOP = "stop"
111
112 #: Maximum file permissions for restricted command directory and executables
113 _RCMD_MAX_MODE = (stat.S_IRWXU |
114 stat.S_IRGRP | stat.S_IXGRP |
115 stat.S_IROTH | stat.S_IXOTH)
116
117 #: Delay before returning an error for restricted commands
118 _RCMD_INVALID_DELAY = 10
119
120 #: How long to wait to acquire lock for restricted commands (shorter than
121 #: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
122 #: command requests arrive
123 _RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
124
125
126 class RPCFail(Exception):
127 """Class denoting RPC failure.
128
129 Its argument is the error message.
130
131 """
132
133
134 def _GetInstReasonFilename(instance_name):
135 """Path of the file containing the reason of the instance status change.
136
137 @type instance_name: string
138 @param instance_name: The name of the instance
139 @rtype: string
140 @return: The path of the file
141
142 """
143 return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
144
145
146 def _StoreInstReasonTrail(instance_name, trail):
147 """Serialize a reason trail related to an instance change of state to file.
148
149 The exact location of the file depends on the name of the instance and on
150 the configuration of the Ganeti cluster defined at deploy time.
151
152 @type instance_name: string
153 @param instance_name: The name of the instance
154
155 @type trail: list of reasons
156 @param trail: reason trail
157
158 @rtype: None
159
160 """
161 json = serializer.DumpJson(trail)
162 filename = _GetInstReasonFilename(instance_name)
163 utils.WriteFile(filename, data=json)
164
165
166 def _Fail(msg, *args, **kwargs):
167 """Log an error and the raise an RPCFail exception.
168
169 This exception is then handled specially in the ganeti daemon and
170 turned into a 'failed' return type. As such, this function is a
171 useful shortcut for logging the error and returning it to the master
172 daemon.
173
174 @type msg: string
175 @param msg: the text of the exception
176 @raise RPCFail
177
178 """
179 if args:
180 msg = msg % args
181 if "log" not in kwargs or kwargs["log"]: # if we should log this error
182 if "exc" in kwargs and kwargs["exc"]:
183 logging.exception(msg)
184 else:
185 logging.error(msg)
186 raise RPCFail(msg)
187
188
189 def _GetConfig():
190 """Simple wrapper to return a SimpleStore.
191
192 @rtype: L{ssconf.SimpleStore}
193 @return: a SimpleStore instance
194
195 """
196 return ssconf.SimpleStore()
197
198
199 def _GetSshRunner(cluster_name):
200 """Simple wrapper to return an SshRunner.
201
202 @type cluster_name: str
203 @param cluster_name: the cluster name, which is needed
204 by the SshRunner constructor
205 @rtype: L{ssh.SshRunner}
206 @return: an SshRunner instance
207
208 """
209 return ssh.SshRunner(cluster_name)
210
211
212 def _Decompress(data):
213 """Unpacks data compressed by the RPC client.
214
215 @type data: list or tuple
216 @param data: Data sent by RPC client
217 @rtype: str
218 @return: Decompressed data
219
220 """
221 assert isinstance(data, (list, tuple))
222 assert len(data) == 2
223 (encoding, content) = data
224 if encoding == constants.RPC_ENCODING_NONE:
225 return content
226 elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
227 return zlib.decompress(base64.b64decode(content))
228 else:
229 raise AssertionError("Unknown data encoding")
230
231
232 def _CleanDirectory(path, exclude=None):
233 """Removes all regular files in a directory.
234
235 @type path: str
236 @param path: the directory to clean
237 @type exclude: list
238 @param exclude: list of files to be excluded, defaults
239 to the empty list
240
241 """
242 if path not in _ALLOWED_CLEAN_DIRS:
243 _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
244 path)
245
246 if not os.path.isdir(path):
247 return
248 if exclude is None:
249 exclude = []
250 else:
251 # Normalize excluded paths
252 exclude = [os.path.normpath(i) for i in exclude]
253
254 for rel_name in utils.ListVisibleFiles(path):
255 full_name = utils.PathJoin(path, rel_name)
256 if full_name in exclude:
257 continue
258 if os.path.isfile(full_name) and not os.path.islink(full_name):
259 utils.RemoveFile(full_name)
260
261
262 def _BuildUploadFileList():
263 """Build the list of allowed upload files.
264
265 This is abstracted so that it's built only once at module import time.
266
267 """
268 allowed_files = set([
269 pathutils.CLUSTER_CONF_FILE,
270 pathutils.ETC_HOSTS,
271 pathutils.SSH_KNOWN_HOSTS_FILE,
272 pathutils.VNC_PASSWORD_FILE,
273 pathutils.RAPI_CERT_FILE,
274 pathutils.SPICE_CERT_FILE,
275 pathutils.SPICE_CACERT_FILE,
276 pathutils.RAPI_USERS_FILE,
277 pathutils.CONFD_HMAC_KEY,
278 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
279 ])
280
281 for hv_name in constants.HYPER_TYPES:
282 hv_class = hypervisor.GetHypervisorClass(hv_name)
283 allowed_files.update(hv_class.GetAncillaryFiles()[0])
284
285 assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
286 "Allowed file storage paths should never be uploaded via RPC"
287
288 return frozenset(allowed_files)
289
290
291 _ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
292
293
294 def JobQueuePurge():
295 """Removes job queue files and archived jobs.
296
297 @rtype: tuple
298 @return: True, None
299
300 """
301 _CleanDirectory(pathutils.QUEUE_DIR, exclude=[pathutils.JOB_QUEUE_LOCK_FILE])
302 _CleanDirectory(pathutils.JOB_QUEUE_ARCHIVE_DIR)
303
304
305 def GetMasterNodeName():
306 """Returns the master node name.
307
308 @rtype: string
309 @return: name of the master node
310 @raise RPCFail: in case of errors
311
312 """
313 try:
314 return _GetConfig().GetMasterNode()
315 except errors.ConfigurationError, err:
316 _Fail("Cluster configuration incomplete: %s", err, exc=True)
317
318
319 def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
320 """Decorator that runs hooks before and after the decorated function.
321
322 @type hook_opcode: string
323 @param hook_opcode: opcode of the hook
324 @type hooks_path: string
325 @param hooks_path: path of the hooks
326 @type env_builder_fn: function
327 @param env_builder_fn: function that returns a dictionary containing the
328 environment variables for the hooks. Will get all the parameters of the
329 decorated function.
330 @raise RPCFail: in case of pre-hook failure
331
332 """
333 def decorator(fn):
334 def wrapper(*args, **kwargs):
335 _, myself = ssconf.GetMasterAndMyself()
336 nodes = ([myself], [myself]) # these hooks run locally
337
338 env_fn = compat.partial(env_builder_fn, *args, **kwargs)
339
340 cfg = _GetConfig()
341 hr = HooksRunner()
342 hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
343 hr.RunLocalHooks, None, env_fn, None,
344 logging.warning, cfg.GetClusterName(),
345 cfg.GetMasterNode())
346 hm.RunPhase(constants.HOOKS_PHASE_PRE)
347 result = fn(*args, **kwargs)
348 hm.RunPhase(constants.HOOKS_PHASE_POST)
349
350 return result
351 return wrapper
352 return decorator
353
354
355 def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
356 """Builds environment variables for master IP hooks.
357
358 @type master_params: L{objects.MasterNetworkParameters}
359 @param master_params: network parameters of the master
360 @type use_external_mip_script: boolean
361 @param use_external_mip_script: whether to use an external master IP
362 address setup script (unused, but necessary per the implementation of the
363 _RunLocalHooks decorator)
364
365 """
366 # pylint: disable=W0613
367 ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
368 env = {
369 "MASTER_NETDEV": master_params.netdev,
370 "MASTER_IP": master_params.ip,
371 "MASTER_NETMASK": str(master_params.netmask),
372 "CLUSTER_IP_VERSION": str(ver),
373 }
374
375 return env
376
377
378 def _RunMasterSetupScript(master_params, action, use_external_mip_script):
379 """Execute the master IP address setup script.
380
381 @type master_params: L{objects.MasterNetworkParameters}
382 @param master_params: network parameters of the master
383 @type action: string
384 @param action: action to pass to the script. Must be one of
385 L{backend._MASTER_START} or L{backend._MASTER_STOP}
386 @type use_external_mip_script: boolean
387 @param use_external_mip_script: whether to use an external master IP
388 address setup script
389 @raise backend.RPCFail: if there are errors during the execution of the
390 script
391
392 """
393 env = _BuildMasterIpEnv(master_params)
394
395 if use_external_mip_script:
396 setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
397 else:
398 setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
399
400 result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
401
402 if result.failed:
403 _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
404 (action, result.exit_code, result.output), log=True)
405
406
407 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
408 _BuildMasterIpEnv)
409 def ActivateMasterIp(master_params, use_external_mip_script):
410 """Activate the IP address of the master daemon.
411
412 @type master_params: L{objects.MasterNetworkParameters}
413 @param master_params: network parameters of the master
414 @type use_external_mip_script: boolean
415 @param use_external_mip_script: whether to use an external master IP
416 address setup script
417 @raise RPCFail: in case of errors during the IP startup
418
419 """
420 _RunMasterSetupScript(master_params, _MASTER_START,
421 use_external_mip_script)
422
423
424 def StartMasterDaemons(no_voting):
425 """Activate local node as master node.
426
427 The function will start the master daemons (ganeti-masterd and ganeti-rapi).
428
429 @type no_voting: boolean
430 @param no_voting: whether to start ganeti-masterd without a node vote
431 but still non-interactively
432 @rtype: None
433
434 """
435
436 if no_voting:
437 daemon_args = "--no-voting --yes-do-it"
438 else:
439 daemon_args = ""
440
441 env = {
442 "EXTRA_LUXID_ARGS": daemon_args,
443 "EXTRA_WCONFD_ARGS": daemon_args,
444 }
445
446 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
447 if result.failed:
448 msg = "Can't start Ganeti master: %s" % result.output
449 logging.error(msg)
450 _Fail(msg)
451
452
453 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
454 _BuildMasterIpEnv)
455 def DeactivateMasterIp(master_params, use_external_mip_script):
456 """Deactivate the master IP on this node.
457
458 @type master_params: L{objects.MasterNetworkParameters}
459 @param master_params: network parameters of the master
460 @type use_external_mip_script: boolean
461 @param use_external_mip_script: whether to use an external master IP
462 address setup script
463 @raise RPCFail: in case of errors during the IP turndown
464
465 """
466 _RunMasterSetupScript(master_params, _MASTER_STOP,
467 use_external_mip_script)
468
469
470 def StopMasterDaemons():
471 """Stop the master daemons on this node.
472
473 Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
474
475 @rtype: None
476
477 """
478 # TODO: log and report back to the caller the error failures; we
479 # need to decide in which case we fail the RPC for this
480
481 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
482 if result.failed:
483 logging.error("Could not stop Ganeti master, command %s had exitcode %s"
484 " and error %s",
485 result.cmd, result.exit_code, result.output)
486
487
488 def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
489 """Change the netmask of the master IP.
490
491 @param old_netmask: the old value of the netmask
492 @param netmask: the new value of the netmask
493 @param master_ip: the master IP
494 @param master_netdev: the master network device
495
496 """
497 if old_netmask == netmask:
498 return
499
500 if not netutils.IPAddress.Own(master_ip):
501 _Fail("The master IP address is not up, not attempting to change its"
502 " netmask")
503
504 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
505 "%s/%s" % (master_ip, netmask),
506 "dev", master_netdev, "label",
507 "%s:0" % master_netdev])
508 if result.failed:
509 _Fail("Could not set the new netmask on the master IP address")
510
511 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
512 "%s/%s" % (master_ip, old_netmask),
513 "dev", master_netdev, "label",
514 "%s:0" % master_netdev])
515 if result.failed:
516 _Fail("Could not bring down the master IP address with the old netmask")
517
518
519 def EtcHostsModify(mode, host, ip):
520 """Modify a host entry in /etc/hosts.
521
522 @param mode: The mode to operate. Either add or remove entry
523 @param host: The host to operate on
524 @param ip: The ip associated with the entry
525
526 """
527 if mode == constants.ETC_HOSTS_ADD:
528 if not ip:
529 RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
530 " present")
531 utils.AddHostToEtcHosts(host, ip)
532 elif mode == constants.ETC_HOSTS_REMOVE:
533 if ip:
534 RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
535 " parameter is present")
536 utils.RemoveHostFromEtcHosts(host)
537 else:
538 RPCFail("Mode not supported")
539
540
541 def LeaveCluster(modify_ssh_setup):
542 """Cleans up and remove the current node.
543
544 This function cleans up and prepares the current node to be removed
545 from the cluster.
546
547 If processing is successful, then it raises an
548 L{errors.QuitGanetiException} which is used as a special case to
549 shutdown the node daemon.
550
551 @param modify_ssh_setup: boolean
552
553 """
554 _CleanDirectory(pathutils.DATA_DIR)
555 _CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
556 JobQueuePurge()
557
558 if modify_ssh_setup:
559 try:
560 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
561
562 utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
563
564 utils.RemoveFile(priv_key)
565 utils.RemoveFile(pub_key)
566 except errors.OpExecError:
567 logging.exception("Error while processing ssh files")
568
569 try:
570 utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
571 utils.RemoveFile(pathutils.RAPI_CERT_FILE)
572 utils.RemoveFile(pathutils.SPICE_CERT_FILE)
573 utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
574 utils.RemoveFile(pathutils.NODED_CERT_FILE)
575 except: # pylint: disable=W0702
576 logging.exception("Error while removing cluster secrets")
577
578 utils.StopDaemon(constants.CONFD)
579 utils.StopDaemon(constants.MOND)
580 utils.StopDaemon(constants.KVMD)
581
582 # Raise a custom exception (handled in ganeti-noded)
583 raise errors.QuitGanetiException(True, "Shutdown scheduled")
584
585
586 def _CheckStorageParams(params, num_params):
587 """Performs sanity checks for storage parameters.
588
589 @type params: list
590 @param params: list of storage parameters
591 @type num_params: int
592 @param num_params: expected number of parameters
593
594 """
595 if params is None:
596 raise errors.ProgrammerError("No storage parameters for storage"
597 " reporting is provided.")
598 if not isinstance(params, list):
599 raise errors.ProgrammerError("The storage parameters are not of type"
600 " list: '%s'" % params)
601 if not len(params) == num_params:
602 raise errors.ProgrammerError("Did not receive the expected number of"
603 "storage parameters: expected %s,"
604 " received '%s'" % (num_params, len(params)))
605
606
607 def _CheckLvmStorageParams(params):
608 """Performs sanity check for the 'exclusive storage' flag.
609
610 @see: C{_CheckStorageParams}
611
612 """
613 _CheckStorageParams(params, 1)
614 excl_stor = params[0]
615 if not isinstance(params[0], bool):
616 raise errors.ProgrammerError("Exclusive storage parameter is not"
617 " boolean: '%s'." % excl_stor)
618 return excl_stor
619
620
621 def _GetLvmVgSpaceInfo(name, params):
622 """Wrapper around C{_GetVgInfo} which checks the storage parameters.
623
624 @type name: string
625 @param name: name of the volume group
626 @type params: list
627 @param params: list of storage parameters, which in this case should be
628 containing only one for exclusive storage
629
630 """
631 excl_stor = _CheckLvmStorageParams(params)
632 return _GetVgInfo(name, excl_stor)
633
634
635 def _GetVgInfo(
636 name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
637 """Retrieves information about a LVM volume group.
638
639 """
640 # TODO: GetVGInfo supports returning information for multiple VGs at once
641 vginfo = info_fn([name], excl_stor)
642 if vginfo:
643 vg_free = int(round(vginfo[0][0], 0))
644 vg_size = int(round(vginfo[0][1], 0))
645 else:
646 vg_free = None
647 vg_size = None
648
649 return {
650 "type": constants.ST_LVM_VG,
651 "name": name,
652 "storage_free": vg_free,
653 "storage_size": vg_size,
654 }
655
656
657 def _GetLvmPvSpaceInfo(name, params):
658 """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
659
660 @see: C{_GetLvmVgSpaceInfo}
661
662 """
663 excl_stor = _CheckLvmStorageParams(params)
664 return _GetVgSpindlesInfo(name, excl_stor)
665
666
667 def _GetVgSpindlesInfo(
668 name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
669 """Retrieves information about spindles in an LVM volume group.
670
671 @type name: string
672 @param name: VG name
673 @type excl_stor: bool
674 @param excl_stor: exclusive storage
675 @rtype: dict
676 @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
677 free spindles, total spindles respectively
678
679 """
680 if excl_stor:
681 (vg_free, vg_size) = info_fn(name)
682 else:
683 vg_free = 0
684 vg_size = 0
685 return {
686 "type": constants.ST_LVM_PV,
687 "name": name,
688 "storage_free": vg_free,
689 "storage_size": vg_size,
690 }
691
692
693 def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
694 """Retrieves node information from a hypervisor.
695
696 The information returned depends on the hypervisor. Common items:
697
698 - vg_size is the size of the configured volume group in MiB
699 - vg_free is the free size of the volume group in MiB
700 - memory_dom0 is the memory allocated for domain0 in MiB
701 - memory_free is the currently available (free) ram in MiB
702 - memory_total is the total number of ram in MiB
703 - hv_version: the hypervisor version, if available
704
705 @type hvparams: dict of string
706 @param hvparams: the hypervisor's hvparams
707
708 """
709 return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
710
711
712 def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
713 """Retrieves node information for all hypervisors.
714
715 See C{_GetHvInfo} for information on the output.
716
717 @type hv_specs: list of pairs (string, dict of strings)
718 @param hv_specs: list of pairs of a hypervisor's name and its hvparams
719
720 """
721 if hv_specs is None:
722 return None
723
724 result = []
725 for hvname, hvparams in hv_specs:
726 result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
727 return result
728
729
730 def _GetNamedNodeInfo(names, fn):
731 """Calls C{fn} for all names in C{names} and returns a dictionary.
732
733 @rtype: None or dict
734
735 """
736 if names is None:
737 return None
738 else:
739 return map(fn, names)
740
741
742 def GetNodeInfo(storage_units, hv_specs):
743 """Gives back a hash with different information about the node.
744
745 @type storage_units: list of tuples (string, string, list)
746 @param storage_units: List of tuples (storage unit, identifier, parameters) to
747 ask for disk space information. In case of lvm-vg, the identifier is
748 the VG name. The parameters can contain additional, storage-type-specific
749 parameters, for example exclusive storage for lvm storage.
750 @type hv_specs: list of pairs (string, dict of strings)
751 @param hv_specs: list of pairs of a hypervisor's name and its hvparams
752 @rtype: tuple; (string, None/dict, None/dict)
753 @return: Tuple containing boot ID, volume group information and hypervisor
754 information
755
756 """
757 bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
758 storage_info = _GetNamedNodeInfo(
759 storage_units,
760 (lambda (storage_type, storage_key, storage_params):
761 _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
762 hv_info = _GetHvInfoAll(hv_specs)
763 return (bootid, storage_info, hv_info)
764
765
766 def _GetFileStorageSpaceInfo(path, params):
767 """Wrapper around filestorage.GetSpaceInfo.
768
769 The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
770 and ignore the *args parameter to not leak it into the filestorage
771 module's code.
772
773 @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
774 parameters.
775
776 """
777 _CheckStorageParams(params, 0)
778 return filestorage.GetFileStorageSpaceInfo(path)
779
780
781 # FIXME: implement storage reporting for all missing storage types.
782 _STORAGE_TYPE_INFO_FN = {
783 constants.ST_BLOCK: None,
784 constants.ST_DISKLESS: None,
785 constants.ST_EXT: None,
786 constants.ST_FILE: _GetFileStorageSpaceInfo,
787 constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
788 constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
789 constants.ST_SHARED_FILE: None,
790 constants.ST_GLUSTER: None,
791 constants.ST_RADOS: None,
792 }
793
794
795 def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
796 """Looks up and applies the correct function to calculate free and total
797 storage for the given storage type.
798
799 @type storage_type: string
800 @param storage_type: the storage type for which the storage shall be reported.
801 @type storage_key: string
802 @param storage_key: identifier of a storage unit, e.g. the volume group name
803 of an LVM storage unit
804 @type args: any
805 @param args: various parameters that can be used for storage reporting. These
806 parameters and their semantics vary from storage type to storage type and
807 are just propagated in this function.
808 @return: the results of the application of the storage space function (see
809 _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
810 storage type
811 @raises NotImplementedError: for storage types who don't support space
812 reporting yet
813 """
814 fn = _STORAGE_TYPE_INFO_FN[storage_type]
815 if fn is not None:
816 return fn(storage_key, *args)
817 else:
818 raise NotImplementedError
819
820
821 def _CheckExclusivePvs(pvi_list):
822 """Check that PVs are not shared among LVs
823
824 @type pvi_list: list of L{objects.LvmPvInfo} objects
825 @param pvi_list: information about the PVs
826
827 @rtype: list of tuples (string, list of strings)
828 @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
829
830 """
831 res = []
832 for pvi in pvi_list:
833 if len(pvi.lv_list) > 1:
834 res.append((pvi.name, pvi.lv_list))
835 return res
836
837
838 def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
839 get_hv_fn=hypervisor.GetHypervisor):
840 """Verifies the hypervisor. Appends the results to the 'results' list.
841
842 @type what: C{dict}
843 @param what: a dictionary of things to check
844 @type vm_capable: boolean
845 @param vm_capable: whether or not this node is vm capable
846 @type result: dict
847 @param result: dictionary of verification results; results of the
848 verifications in this function will be added here
849 @type all_hvparams: dict of dict of string
850 @param all_hvparams: dictionary mapping hypervisor names to hvparams
851 @type get_hv_fn: function
852 @param get_hv_fn: function to retrieve the hypervisor, to improve testability
853
854 """
855 if not vm_capable:
856 return
857
858 if constants.NV_HYPERVISOR in what:
859 result[constants.NV_HYPERVISOR] = {}
860 for hv_name in what[constants.NV_HYPERVISOR]:
861 hvparams = all_hvparams[hv_name]
862 try:
863 val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
864 except errors.HypervisorError, err:
865 val = "Error while checking hypervisor: %s" % str(err)
866 result[constants.NV_HYPERVISOR][hv_name] = val
867
868
869 def _VerifyHvparams(what, vm_capable, result,
870 get_hv_fn=hypervisor.GetHypervisor):
871 """Verifies the hvparams. Appends the results to the 'results' list.
872
873 @type what: C{dict}
874 @param what: a dictionary of things to check
875 @type vm_capable: boolean
876 @param vm_capable: whether or not this node is vm capable
877 @type result: dict
878 @param result: dictionary of verification results; results of the
879 verifications in this function will be added here
880 @type get_hv_fn: function
881 @param get_hv_fn: function to retrieve the hypervisor, to improve testability
882
883 """
884 if not vm_capable:
885 return
886
887 if constants.NV_HVPARAMS in what:
888 result[constants.NV_HVPARAMS] = []
889 for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
890 try:
891 logging.info("Validating hv %s, %s", hv_name, hvparms)
892 get_hv_fn(hv_name).ValidateParameters(hvparms)
893 except errors.HypervisorError, err:
894 result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
895
896
897 def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
898 """Verifies the instance list.
899
900 @type what: C{dict}
901 @param what: a dictionary of things to check
902 @type vm_capable: boolean
903 @param vm_capable: whether or not this node is vm capable
904 @type result: dict
905 @param result: dictionary of verification results; results of the
906 verifications in this function will be added here
907 @type all_hvparams: dict of dict of string
908 @param all_hvparams: dictionary mapping hypervisor names to hvparams
909
910 """
911 if constants.NV_INSTANCELIST in what and vm_capable:
912 # GetInstanceList can fail
913 try:
914 val = GetInstanceList(what[constants.NV_INSTANCELIST],
915 all_hvparams=all_hvparams)
916 except RPCFail, err:
917 val = str(err)
918 result[constants.NV_INSTANCELIST] = val
919
920
921 def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
922 """Verifies the node info.
923
924 @type what: C{dict}
925 @param what: a dictionary of things to check
926 @type vm_capable: boolean
927 @param vm_capable: whether or not this node is vm capable
928 @type result: dict
929 @param result: dictionary of verification results; results of the
930 verifications in this function will be added here
931 @type all_hvparams: dict of dict of string
932 @param all_hvparams: dictionary mapping hypervisor names to hvparams
933
934 """
935 if constants.NV_HVINFO in what and vm_capable:
936 hvname = what[constants.NV_HVINFO]
937 hyper = hypervisor.GetHypervisor(hvname)
938 hvparams = all_hvparams[hvname]
939 result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
940
941
942 def _VerifyClientCertificate(cert_file=pathutils.NODED_CLIENT_CERT_FILE):
943 """Verify the existance and validity of the client SSL certificate.
944
945 Also, verify that the client certificate is not self-signed. Self-
946 signed client certificates stem from Ganeti versions 2.12.0 - 2.12.4
947 and should be replaced by client certificates signed by the server
948 certificate. Hence we output a warning when we encounter a self-signed
949 one.
950
951 """
952 create_cert_cmd = "gnt-cluster renew-crypto --new-node-certificates"
953 if not os.path.exists(cert_file):
954 return (constants.CV_ERROR,
955 "The client certificate does not exist. Run '%s' to create"
956 " client certificates for all nodes." % create_cert_cmd)
957
958 (errcode, msg) = utils.VerifyCertificate(cert_file)
959 if errcode is not None:
960 return (errcode, msg)
961
962 (errcode, msg) = utils.IsCertificateSelfSigned(cert_file)
963 if errcode is not None:
964 return (errcode, msg)
965
966 # if everything is fine, we return the digest to be compared to the config
967 return (None, utils.GetCertificateDigest(cert_filename=cert_file))
968
969
970 def VerifyNode(what, cluster_name, all_hvparams, node_groups, groups_cfg):
971 """Verify the status of the local node.
972
973 Based on the input L{what} parameter, various checks are done on the
974 local node.
975
976 If the I{filelist} key is present, this list of
977 files is checksummed and the file/checksum pairs are returned.
978
979 If the I{nodelist} key is present, we check that we have
980 connectivity via ssh with the target nodes (and check the hostname
981 report).
982
983 If the I{node-net-test} key is present, we check that we have
984 connectivity to the given nodes via both primary IP and, if
985 applicable, secondary IPs.
986
987 @type what: C{dict}
988 @param what: a dictionary of things to check:
989 - filelist: list of files for which to compute checksums
990 - nodelist: list of nodes we should check ssh communication with
991 - node-net-test: list of nodes we should check node daemon port
992 connectivity with
993 - hypervisor: list with hypervisors to run the verify for
994 @type cluster_name: string
995 @param cluster_name: the cluster's name
996 @type all_hvparams: dict of dict of strings
997 @param all_hvparams: a dictionary mapping hypervisor names to hvparams
998 @type node_groups: a dict of strings
999 @param node_groups: node _names_ mapped to their group uuids (it's enough to
1000 have only those nodes that are in `what["nodelist"]`)
1001 @type groups_cfg: a dict of dict of strings
1002 @param groups_cfg: a dictionary mapping group uuids to their configuration
1003 @rtype: dict
1004 @return: a dictionary with the same keys as the input dict, and
1005 values representing the result of the checks
1006
1007 """
1008 result = {}
1009 my_name = netutils.Hostname.GetSysName()
1010 port = netutils.GetDaemonPort(constants.NODED)
1011 vm_capable = my_name not in what.get(constants.NV_NONVMNODES, [])
1012
1013 _VerifyHypervisors(what, vm_capable, result, all_hvparams)
1014 _VerifyHvparams(what, vm_capable, result)
1015
1016 if constants.NV_FILELIST in what:
1017 fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
1018 what[constants.NV_FILELIST]))
1019 result[constants.NV_FILELIST] = \
1020 dict((vcluster.MakeVirtualPath(key), value)
1021 for (key, value) in fingerprints.items())
1022
1023 if constants.NV_CLIENT_CERT in what:
1024 result[constants.NV_CLIENT_CERT] = _VerifyClientCertificate()
1025
1026 if constants.NV_NODELIST in what:
1027 (nodes, bynode) = what[constants.NV_NODELIST]
1028
1029 # Add nodes from other groups (different for each node)
1030 try:
1031 nodes.extend(bynode[my_name])
1032 except KeyError:
1033 pass
1034
1035 # Use a random order
1036 random.shuffle(nodes)
1037
1038 # Try to contact all nodes
1039 val = {}
1040 for node in nodes:
1041 params = groups_cfg.get(node_groups.get(node))
1042 ssh_port = params["ndparams"].get(constants.ND_SSH_PORT)
1043 logging.debug("Ssh port %s (None = default) for node %s",
1044 str(ssh_port), node)
1045 success, message = _GetSshRunner(cluster_name). \
1046 VerifyNodeHostname(node, ssh_port)
1047 if not success:
1048 val[node] = message
1049
1050 result[constants.NV_NODELIST] = val
1051
1052 if constants.NV_NODENETTEST in what:
1053 result[constants.NV_NODENETTEST] = tmp = {}
1054 my_pip = my_sip = None
1055 for name, pip, sip in what[constants.NV_NODENETTEST]:
1056 if name == my_name:
1057 my_pip = pip
1058 my_sip = sip
1059 break
1060 if not my_pip:
1061 tmp[my_name] = ("Can't find my own primary/secondary IP"
1062 " in the node list")
1063 else:
1064 for name, pip, sip in what[constants.NV_NODENETTEST]:
1065 fail = []
1066 if not netutils.TcpPing(pip, port, source=my_pip):
1067 fail.append("primary")
1068 if sip != pip:
1069 if not netutils.TcpPing(sip, port, source=my_sip):
1070 fail.append("secondary")
1071 if fail:
1072 tmp[name] = ("failure using the %s interface(s)" %
1073 " and ".join(fail))
1074
1075 if constants.NV_MASTERIP in what:
1076 # FIXME: add checks on incoming data structures (here and in the
1077 # rest of the function)
1078 master_name, master_ip = what[constants.NV_MASTERIP]
1079 if master_name == my_name:
1080 source = constants.IP4_ADDRESS_LOCALHOST
1081 else:
1082 source = None
1083 result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
1084 source=source)
1085
1086 if constants.NV_USERSCRIPTS in what:
1087 result[constants.NV_USERSCRIPTS] = \
1088 [script for script in what[constants.NV_USERSCRIPTS]
1089 if not utils.IsExecutable(script)]
1090
1091 if constants.NV_OOB_PATHS in what:
1092 result[constants.NV_OOB_PATHS] = tmp = []
1093 for path in what[constants.NV_OOB_PATHS]:
1094 try:
1095 st = os.stat(path)
1096 except OSError, err:
1097 tmp.append("error stating out of band helper: %s" % err)
1098 else:
1099 if stat.S_ISREG(st.st_mode):
1100 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
1101 tmp.append(None)
1102 else:
1103 tmp.append("out of band helper %s is not executable" % path)
1104 else:
1105 tmp.append("out of band helper %s is not a file" % path)
1106
1107 if constants.NV_LVLIST in what and vm_capable:
1108 try:
1109 val = GetVolumeList(utils.ListVolumeGroups().keys())
1110 except RPCFail, err:
1111 val = str(err)
1112 result[constants.NV_LVLIST] = val
1113
1114 _VerifyInstanceList(what, vm_capable, result, all_hvparams)
1115
1116 if constants.NV_VGLIST in what and vm_capable:
1117 result[constants.NV_VGLIST] = utils.ListVolumeGroups()
1118
1119 if constants.NV_PVLIST in what and vm_capable:
1120 check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
1121 val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
1122 filter_allocatable=False,
1123 include_lvs=check_exclusive_pvs)
1124 if check_exclusive_pvs:
1125 result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
1126 for pvi in val:
1127 # Avoid sending useless data on the wire
1128 pvi.lv_list = []
1129 result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
1130
1131 if constants.NV_VERSION in what:
1132 result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
1133 constants.RELEASE_VERSION)
1134
1135 _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
1136
1137 if constants.NV_DRBDVERSION in what and vm_capable:
1138 try:
1139 drbd_version = DRBD8.GetProcInfo().GetVersionString()
1140 except errors.BlockDeviceError, err:
1141 logging.warning("Can't get DRBD version", exc_info=True)
1142 drbd_version = str(err)
1143 result[constants.NV_DRBDVERSION] = drbd_version
1144
1145 if constants.NV_DRBDLIST in what and vm_capable:
1146 try:
1147 used_minors = drbd.DRBD8.GetUsedDevs()
1148 except errors.BlockDeviceError, err:
1149 logging.warning("Can't get used minors list", exc_info=True)
1150 used_minors = str(err)
1151 result[constants.NV_DRBDLIST] = used_minors
1152
1153 if constants.NV_DRBDHELPER in what and vm_capable:
1154 status = True
1155 try:
1156 payload = drbd.DRBD8.GetUsermodeHelper()
1157 except errors.BlockDeviceError, err:
1158 logging.error("Can't get DRBD usermode helper: %s", str(err))
1159 status = False
1160 payload = str(err)
1161 result[constants.NV_DRBDHELPER] = (status, payload)
1162
1163 if constants.NV_NODESETUP in what:
1164 result[constants.NV_NODESETUP] = tmpr = []
1165 if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
1166 tmpr.append("The sysfs filesytem doesn't seem to be mounted"
1167 " under /sys, missing required directories /sys/block"
1168 " and /sys/class/net")
1169 if (not os.path.isdir("/proc/sys") or
1170 not os.path.isfile("/proc/sysrq-trigger")):
1171 tmpr.append("The procfs filesystem doesn't seem to be mounted"
1172 " under /proc, missing required directory /proc/sys and"
1173 " the file /proc/sysrq-trigger")
1174
1175 if constants.NV_TIME in what:
1176 result[constants.NV_TIME] = utils.SplitTime(time.time())
1177
1178 if constants.NV_OSLIST in what and vm_capable:
1179 result[constants.NV_OSLIST] = DiagnoseOS()
1180
1181 if constants.NV_BRIDGES in what and vm_capable:
1182 result[constants.NV_BRIDGES] = [bridge
1183 for bridge in what[constants.NV_BRIDGES]
1184 if not utils.BridgeExists(bridge)]
1185
1186 if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
1187 result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
1188 filestorage.ComputeWrongFileStoragePaths()
1189
1190 if what.get(constants.NV_FILE_STORAGE_PATH):
1191 pathresult = filestorage.CheckFileStoragePath(
1192 what[constants.NV_FILE_STORAGE_PATH])
1193 if pathresult:
1194 result[constants.NV_FILE_STORAGE_PATH] = pathresult
1195
1196 if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
1197 pathresult = filestorage.CheckFileStoragePath(
1198 what[constants.NV_SHARED_FILE_STORAGE_PATH])
1199 if pathresult:
1200 result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
1201
1202 return result
1203
1204
1205 def GetCryptoTokens(token_requests):
1206 """Perform actions on the node's cryptographic tokens.
1207
1208 Token types can be 'ssl' or 'ssh'. So far only some actions are implemented
1209 for 'ssl'. Action 'get' returns the digest of the public client ssl
1210 certificate. Action 'create' creates a new client certificate and private key
1211 and also returns the digest of the certificate. The third parameter of a
1212 token request are optional parameters for the actions, so far only the
1213 filename is supported.
1214
1215 @type token_requests: list of tuples of (string, string, dict), where the
1216 first string is in constants.CRYPTO_TYPES, the second in
1217 constants.CRYPTO_ACTIONS. The third parameter is a dictionary of string
1218 to string.
1219 @param token_requests: list of requests of cryptographic tokens and actions
1220 to perform on them. The actions come with a dictionary of options.
1221 @rtype: list of tuples (string, string)
1222 @return: list of tuples of the token type and the public crypto token
1223
1224 """
1225 tokens = []
1226 for (token_type, action, _) in token_requests:
1227 if token_type not in constants.CRYPTO_TYPES:
1228 raise errors.ProgrammerError("Token type '%s' not supported." %
1229 token_type)
1230 if action not in constants.CRYPTO_ACTIONS:
1231 raise errors.ProgrammerError("Action '%s' is not supported." %
1232 action)
1233 if token_type == constants.CRYPTO_TYPE_SSL_DIGEST:
1234 tokens.append((token_type,
1235 utils.GetCertificateDigest()))
1236 return tokens
1237
1238
1239 def EnsureDaemon(daemon_name, run):
1240 """Ensures the given daemon is running or stopped.
1241
1242 @type daemon_name: string
1243 @param daemon_name: name of the daemon (e.g., constants.KVMD)
1244
1245 @type run: bool
1246 @param run: whether to start or stop the daemon
1247
1248 @rtype: bool
1249 @return: 'True' if daemon successfully started/stopped,
1250 'False' otherwise
1251
1252 """
1253 allowed_daemons = [constants.KVMD]
1254
1255 if daemon_name not in allowed_daemons:
1256 fn = lambda _: False
1257 elif run:
1258 fn = utils.EnsureDaemon
1259 else:
1260 fn = utils.StopDaemon
1261
1262 return fn(daemon_name)
1263
1264
1265 def GetBlockDevSizes(devices):
1266 """Return the size of the given block devices
1267
1268 @type devices: list
1269 @param devices: list of block device nodes to query
1270 @rtype: dict
1271 @return:
1272 dictionary of all block devices under /dev (key). The value is their
1273 size in MiB.
1274
1275 {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
1276
1277 """
1278 DEV_PREFIX = "/dev/"
1279 blockdevs = {}
1280
1281 for devpath in devices:
1282 if not utils.IsBelowDir(DEV_PREFIX, devpath):
1283 continue
1284
1285 try:
1286 st = os.stat(devpath)
1287 except EnvironmentError, err:
1288 logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
1289 continue
1290
1291 if stat.S_ISBLK(st.st_mode):
1292 result = utils.RunCmd(["blockdev", "--getsize64", devpath])
1293 if result.failed:
1294 # We don't want to fail, just do not list this device as available
1295 logging.warning("Cannot get size for block device %s", devpath)
1296 continue
1297
1298 size = int(result.stdout) / (1024 * 1024)
1299 blockdevs[devpath] = size
1300 return blockdevs
1301
1302
1303 def GetVolumeList(vg_names):
1304 """Compute list of logical volumes and their size.
1305
1306 @type vg_names: list
1307 @param vg_names: the volume groups whose LVs we should list, or
1308 empty for all volume groups
1309 @rtype: dict
1310 @return:
1311 dictionary of all partions (key) with value being a tuple of
1312 their size (in MiB), inactive and online status::
1313
1314 {'xenvg/test1': ('20.06', True, True)}
1315
1316 in case of errors, a string is returned with the error
1317 details.
1318
1319 """
1320 lvs = {}
1321 sep = "|"
1322 if not vg_names:
1323 vg_names = []
1324 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1325 "--separator=%s" % sep,
1326 "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
1327 if result.failed:
1328 _Fail("Failed to list logical volumes, lvs output: %s", result.output)
1329
1330 for line in result.stdout.splitlines():
1331 line = line.strip()
1332 match = _LVSLINE_REGEX.match(line)
1333 if not match:
1334 logging.error("Invalid line returned from lvs output: '%s'", line)
1335 continue
1336 vg_name, name, size, attr = match.groups()
1337 inactive = attr[4] == "-"
1338 online = attr[5] == "o"
1339 virtual = attr[0] == "v"
1340 if virtual:
1341 # we don't want to report such volumes as existing, since they
1342 # don't really hold data
1343 continue
1344 lvs[vg_name + "/" + name] = (size, inactive, online)
1345
1346 return lvs
1347
1348
1349 def ListVolumeGroups():
1350 """List the volume groups and their size.
1351
1352 @rtype: dict
1353 @return: dictionary with keys volume name and values the
1354 size of the volume
1355
1356 """
1357 return utils.ListVolumeGroups()
1358
1359
1360 def NodeVolumes():
1361 """List all volumes on this node.
1362
1363 @rtype: list
1364 @return:
1365 A list of dictionaries, each having four keys:
1366 - name: the logical volume name,
1367 - size: the size of the logical volume
1368 - dev: the physical device on which the LV lives
1369 - vg: the volume group to which it belongs
1370
1371 In case of errors, we return an empty list and log the
1372 error.
1373
1374 Note that since a logical volume can live on multiple physical
1375 volumes, the resulting list might include a logical volume
1376 multiple times.
1377
1378 """
1379 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1380 "--separator=|",
1381 "--options=lv_name,lv_size,devices,vg_name"])
1382 if result.failed:
1383 _Fail("Failed to list logical volumes, lvs output: %s",
1384 result.output)
1385
1386 def parse_dev(dev):
1387 return dev.split("(")[0]
1388
1389 def handle_dev(dev):
1390 return [parse_dev(x) for x in dev.split(",")]
1391
1392 def map_line(line):
1393 line = [v.strip() for v in line]
1394 return [{"name": line[0], "size": line[1],
1395 "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1396
1397 all_devs = []
1398 for line in result.stdout.splitlines():
1399 if line.count("|") >= 3:
1400 all_devs.extend(map_line(line.split("|")))
1401 else:
1402 logging.warning("Strange line in the output from lvs: '%s'", line)
1403 return all_devs
1404
1405
1406 def BridgesExist(bridges_list):
1407 """Check if a list of bridges exist on the current node.
1408
1409 @rtype: boolean
1410 @return: C{True} if all of them exist, C{False} otherwise
1411
1412 """
1413 missing = []
1414 for bridge in bridges_list:
1415 if not utils.BridgeExists(bridge):
1416 missing.append(bridge)
1417
1418 if missing:
1419 _Fail("Missing bridges %s", utils.CommaJoin(missing))
1420
1421
1422 def GetInstanceListForHypervisor(hname, hvparams=None,
1423 get_hv_fn=hypervisor.GetHypervisor):
1424 """Provides a list of instances of the given hypervisor.
1425
1426 @type hname: string
1427 @param hname: name of the hypervisor
1428 @type hvparams: dict of strings
1429 @param hvparams: hypervisor parameters for the given hypervisor
1430 @type get_hv_fn: function
1431 @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1432 name; optional parameter to increase testability
1433
1434 @rtype: list
1435 @return: a list of all running instances on the current node
1436 - instance1.example.com
1437 - instance2.example.com
1438
1439 """
1440 try:
1441 return get_hv_fn(hname).ListInstances(hvparams=hvparams)
1442 except errors.HypervisorError, err:
1443 _Fail("Error enumerating instances (hypervisor %s): %s",
1444 hname, err, exc=True)
1445
1446
1447 def GetInstanceList(hypervisor_list, all_hvparams=None,
1448 get_hv_fn=hypervisor.GetHypervisor):
1449 """Provides a list of instances.
1450
1451 @type hypervisor_list: list
1452 @param hypervisor_list: the list of hypervisors to query information
1453 @type all_hvparams: dict of dict of strings
1454 @param all_hvparams: a dictionary mapping hypervisor types to respective
1455 cluster-wide hypervisor parameters
1456 @type get_hv_fn: function
1457 @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1458 name; optional parameter to increase testability
1459
1460 @rtype: list
1461 @return: a list of all running instances on the current node
1462 - instance1.example.com
1463 - instance2.example.com
1464
1465 """
1466 results = []
1467 for hname in hypervisor_list:
1468 hvparams = all_hvparams[hname]
1469 results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
1470 get_hv_fn=get_hv_fn))
1471 return results
1472
1473
1474 def GetInstanceInfo(instance, hname, hvparams=None):
1475 """Gives back the information about an instance as a dictionary.
1476
1477 @type instance: string
1478 @param instance: the instance name
1479 @type hname: string
1480 @param hname: the hypervisor type of the instance
1481 @type hvparams: dict of strings
1482 @param hvparams: the instance's hvparams
1483
1484 @rtype: dict
1485 @return: dictionary with the following keys:
1486 - memory: memory size of instance (int)
1487 - state: state of instance (HvInstanceState)
1488 - time: cpu time of instance (float)
1489 - vcpus: the number of vcpus (int)
1490
1491 """
1492 output = {}
1493
1494 iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
1495 hvparams=hvparams)
1496 if iinfo is not None:
1497 output["memory"] = iinfo[2]
1498 output["vcpus"] = iinfo[3]
1499 output["state"] = iinfo[4]
1500 output["time"] = iinfo[5]
1501
1502 return output
1503
1504
1505 def GetInstanceMigratable(instance):
1506 """Computes whether an instance can be migrated.
1507
1508 @type instance: L{objects.Instance}
1509 @param instance: object representing the instance to be checked.
1510
1511 @rtype: tuple
1512 @return: tuple of (result, description) where:
1513 - result: whether the instance can be migrated or not
1514 - description: a description of the issue, if relevant
1515
1516 """
1517 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1518 iname = instance.name
1519 if iname not in hyper.ListInstances(hvparams=instance.hvparams):
1520 _Fail("Instance %s is not running", iname)
1521
1522 for idx in range(len(instance.disks_info)):
1523 link_name = _GetBlockDevSymlinkPath(iname, idx)
1524 if not os.path.islink(link_name):
1525 logging.warning("Instance %s is missing symlink %s for disk %d",
1526 iname, link_name, idx)
1527
1528
1529 def GetAllInstancesInfo(hypervisor_list, all_hvparams):
1530 """Gather data about all instances.
1531
1532 This is the equivalent of L{GetInstanceInfo}, except that it
1533 computes data for all instances at once, thus being faster if one
1534 needs data about more than one instance.
1535
1536 @type hypervisor_list: list
1537 @param hypervisor_list: list of hypervisors to query for instance data
1538 @type all_hvparams: dict of dict of strings
1539 @param all_hvparams: mapping of hypervisor names to hvparams
1540
1541 @rtype: dict
1542 @return: dictionary of instance: data, with data having the following keys:
1543 - memory: memory size of instance (int)
1544 - state: xen state of instance (string)
1545 - time: cpu time of instance (float)
1546 - vcpus: the number of vcpus
1547
1548 """
1549 output = {}
1550 for hname in hypervisor_list:
1551 hvparams = all_hvparams[hname]
1552 iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
1553 if iinfo:
1554 for name, _, memory, vcpus, state, times in iinfo:
1555 value = {
1556 "memory": memory,
1557 "vcpus": vcpus,
1558 "state": state,
1559 "time": times,
1560 }
1561 if name in output:
1562 # we only check static parameters, like memory and vcpus,
1563 # and not state and time which can change between the
1564 # invocations of the different hypervisors
1565 for key in "memory", "vcpus":
1566 if value[key] != output[name][key]:
1567 _Fail("Instance %s is running twice"
1568 " with different parameters", name)
1569 output[name] = value
1570
1571 return output
1572
1573
1574 def GetInstanceConsoleInfo(instance_param_dict,
1575 get_hv_fn=hypervisor.GetHypervisor):
1576 """Gather data about the console access of a set of instances of this node.
1577
1578 This function assumes that the caller already knows which instances are on
1579 this node, by calling a function such as L{GetAllInstancesInfo} or
1580 L{GetInstanceList}.
1581
1582 For every instance, a large amount of configuration data needs to be
1583 provided to the hypervisor interface in order to receive the console
1584 information. Whether this could or should be cut down can be discussed.
1585 The information is provided in a dictionary indexed by instance name,
1586 allowing any number of instance queries to be done.
1587
1588 @type instance_param_dict: dict of string to tuple of dictionaries, where the
1589 dictionaries represent: L{objects.Instance}, L{objects.Node},
1590 L{objects.NodeGroup}, HvParams, BeParams
1591 @param instance_param_dict: mapping of instance name to parameters necessary
1592 for console information retrieval
1593
1594 @rtype: dict
1595 @return: dictionary of instance: data, with data having the following keys:
1596 - instance: instance name
1597 - kind: console kind
1598 - message: used with kind == CONS_MESSAGE, indicates console to be
1599 unavailable, supplies error message
1600 - host: host to connect to
1601 - port: port to use
1602 - user: user for login
1603 - command: the command, broken into parts as an array
1604 - display: unknown, potentially unused?
1605
1606 """
1607
1608 output = {}
1609 for inst_name in instance_param_dict:
1610 instance = instance_param_dict[inst_name]["instance"]
1611 pnode = instance_param_dict[inst_name]["node"]
1612 group = instance_param_dict[inst_name]["group"]
1613 hvparams = instance_param_dict[inst_name]["hvParams"]
1614 beparams = instance_param_dict[inst_name]["beParams"]
1615
1616 instance = objects.Instance.FromDict(instance)
1617 pnode = objects.Node.FromDict(pnode)
1618 group = objects.NodeGroup.FromDict(group)
1619
1620 h = get_hv_fn(instance.hypervisor)
1621 output[inst_name] = h.GetInstanceConsole(instance, pnode, group,
1622 hvparams, beparams).ToDict()
1623
1624 return output
1625
1626
1627 def _InstanceLogName(kind, os_name, instance, component):
1628 """Compute the OS log filename for a given instance and operation.
1629
1630 The instance name and os name are passed in as strings since not all
1631 operations have these as part of an instance object.
1632
1633 @type kind: string
1634 @param kind: the operation type (e.g. add, import, etc.)
1635 @type os_name: string
1636 @param os_name: the os name
1637 @type instance: string
1638 @param instance: the name of the instance being imported/added/etc.
1639 @type component: string or None
1640 @param component: the name of the component of the instance being
1641 transferred
1642
1643 """
1644 # TODO: Use tempfile.mkstemp to create unique filename
1645 if component:
1646 assert "/" not in component
1647 c_msg = "-%s" % component
1648 else:
1649 c_msg = ""
1650 base = ("%s-%s-%s%s-%s.log" %
1651 (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1652 return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1653
1654
1655 def InstanceOsAdd(instance, reinstall, debug):
1656 """Add an OS to an instance.
1657
1658 @type instance: L{objects.Instance}
1659 @param instance: Instance whose OS is to be installed
1660 @type reinstall: boolean
1661 @param reinstall: whether this is an instance reinstall
1662 @type debug: integer
1663 @param debug: debug level, passed to the OS scripts
1664 @rtype: None
1665
1666 """
1667 inst_os = OSFromDisk(instance.os)
1668
1669 create_env = OSEnvironment(instance, inst_os, debug)
1670 if reinstall:
1671 create_env["INSTANCE_REINSTALL"] = "1"
1672
1673 logfile = _InstanceLogName("add", instance.os, instance.name, None)
1674
1675 result = utils.RunCmd([inst_os.create_script], env=create_env,
1676 cwd=inst_os.path, output=logfile, reset_env=True)
1677 if result.failed:
1678 logging.error("os create command '%s' returned error: %s, logfile: %s,"
1679 " output: %s", result.cmd, result.fail_reason, logfile,
1680 result.output)
1681 lines = [utils.SafeEncode(val)
1682 for val in utils.TailFile(logfile, lines=20)]
1683 _Fail("OS create script failed (%s), last lines in the"
1684 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1685
1686
1687 def RunRenameInstance(instance, old_name, debug):
1688 """Run the OS rename script for an instance.
1689
1690 @type instance: L{objects.Instance}
1691 @param instance: Instance whose OS is to be installed
1692 @type old_name: string
1693 @param old_name: previous instance name
1694 @type debug: integer
1695 @param debug: debug level, passed to the OS scripts
1696 @rtype: boolean
1697 @return: the success of the operation
1698
1699 """
1700 inst_os = OSFromDisk(instance.os)
1701
1702 rename_env = OSEnvironment(instance, inst_os, debug)
1703 rename_env["OLD_INSTANCE_NAME"] = old_name
1704
1705 logfile = _InstanceLogName("rename", instance.os,
1706 "%s-%s" % (old_name, instance.name), None)
1707
1708 result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1709 cwd=inst_os.path, output=logfile, reset_env=True)
1710
1711 if result.failed:
1712 logging.error("os create command '%s' returned error: %s output: %s",
1713 result.cmd, result.fail_reason, result.output)
1714 lines = [utils.SafeEncode(val)
1715 for val in utils.TailFile(logfile, lines=20)]
1716 _Fail("OS rename script failed (%s), last lines in the"
1717 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1718
1719
1720 def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
1721 """Returns symlink path for block device.
1722
1723 """
1724 if _dir is None:
1725 _dir = pathutils.DISK_LINKS_DIR
1726
1727 return utils.PathJoin(_dir,
1728 ("%s%s%s" %
1729 (instance_name, constants.DISK_SEPARATOR, idx)))
1730
1731
1732 def _SymlinkBlockDev(instance_name, device_path, idx):
1733 """Set up symlinks to a instance's block device.
1734
1735 This is an auxiliary function run when an instance is start (on the primary
1736 node) or when an instance is migrated (on the target node).
1737
1738
1739 @param instance_name: the name of the target instance
1740 @param device_path: path of the physical block device, on the node
1741 @param idx: the disk index
1742 @return: absolute path to the disk's symlink
1743
1744 """
1745 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1746 try:
1747 os.symlink(device_path, link_name)
1748 except OSError, err:
1749 if err.errno == errno.EEXIST:
1750 if (not os.path.islink(link_name) or
1751 os.readlink(link_name) != device_path):
1752 os.remove(link_name)
1753 os.symlink(device_path, link_name)
1754 else:
1755 raise
1756
1757 return link_name
1758
1759
1760 def _RemoveBlockDevLinks(instance_name, disks):
1761 """Remove the block device symlinks belonging to the given instance.
1762
1763 """
1764 for idx, _ in enumerate(disks):
1765 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1766 if os.path.islink(link_name):
1767 try:
1768 os.remove(link_name)
1769 except OSError:
1770 logging.exception("Can't remove symlink '%s'", link_name)
1771
1772
1773 def _CalculateDeviceURI(instance, disk, device):
1774 """Get the URI for the device.
1775
1776 @type instance: L{objects.Instance}
1777 @param instance: the instance which disk belongs to
1778 @type disk: L{objects.Disk}
1779 @param disk: the target disk object
1780 @type device: L{bdev.BlockDev}
1781 @param device: the corresponding BlockDevice
1782 @rtype: string
1783 @return: the device uri if any else None
1784
1785 """
1786 access_mode = disk.params.get(constants.LDP_ACCESS,
1787 constants.DISK_KERNELSPACE)
1788 if access_mode == constants.DISK_USERSPACE:
1789 # This can raise errors.BlockDeviceError
1790 return device.GetUserspaceAccessUri(instance.hypervisor)
1791 else:
1792 return None
1793
1794
1795 def _GatherAndLinkBlockDevs(instance):
1796 """Set up an instance's block device(s).
1797
1798 This is run on the primary node at instance startup. The block
1799 devices must be already assembled.
1800
1801 @type instance: L{objects.Instance}
1802 @param instance: the instance whose disks we should assemble
1803 @rtype: list
1804 @return: list of (disk_object, link_name, drive_uri)
1805
1806 """
1807 block_devices = []
1808 for idx, disk in enumerate(instance.disks_info):
1809 device = _RecursiveFindBD(disk)
1810 if device is None:
1811 raise errors.BlockDeviceError("Block device '%s' is not set up." %
1812 str(disk))
1813 device.Open()
1814 try:
1815 link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1816 except OSError, e:
1817 raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1818 e.strerror)
1819 uri = _CalculateDeviceURI(instance, disk, device)
1820
1821 block_devices.append((disk, link_name, uri))
1822
1823 return block_devices
1824
1825
1826 def _IsInstanceUserDown(instance_info):
1827 return instance_info and \
1828 "state" in instance_info and \
1829 hv_base.HvInstanceState.IsShutdown(instance_info["state"])
1830
1831
1832 def _GetInstanceInfo(instance):
1833 """Helper function L{GetInstanceInfo}"""
1834 return GetInstanceInfo(instance.name, instance.hypervisor,
1835 hvparams=instance.hvparams)
1836
1837
1838 def StartInstance(instance, startup_paused, reason, store_reason=True):
1839 """Start an instance.
1840
1841 @type instance: L{objects.Instance}
1842 @param instance: the instance object
1843 @type startup_paused: bool
1844 @param instance: pause instance at startup?
1845 @type reason: list of reasons
1846 @param reason: the reason trail for this startup
1847 @type store_reason: boolean
1848 @param store_reason: whether to store the shutdown reason trail on file
1849 @rtype: None
1850
1851 """
1852 instance_info = _GetInstanceInfo(instance)
1853
1854 if instance_info and not _IsInstanceUserDown(instance_info):
1855 logging.info("Instance '%s' already running, not starting", instance.name)
1856 return
1857
1858 try:
1859 block_devices = _GatherAndLinkBlockDevs(instance)
1860 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1861 hyper.StartInstance(instance, block_devices, startup_paused)
1862 if store_reason:
1863 _StoreInstReasonTrail(instance.name, reason)
1864 except errors.BlockDeviceError, err:
1865 _Fail("Block device error: %s", err, exc=True)
1866 except errors.HypervisorError, err:
1867 _RemoveBlockDevLinks(instance.name, instance.disks_info)
1868 _Fail("Hypervisor error: %s", err, exc=True)
1869
1870
1871 def InstanceShutdown(instance, timeout, reason, store_reason=True):
1872 """Shut an instance down.
1873
1874 @note: this functions uses polling with a hardcoded timeout.
1875
1876 @type instance: L{objects.Instance}
1877 @param instance: the instance object
1878 @type timeout: integer
1879 @param timeout: maximum timeout for soft shutdown
1880 @type reason: list of reasons
1881 @param reason: the reason trail for this shutdown
1882 @type store_reason: boolean
1883 @param store_reason: whether to store the shutdown reason trail on file
1884 @rtype: None
1885
1886 """
1887 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1888
1889 if not _GetInstanceInfo(instance):
1890 logging.info("Instance '%s' not running, doing nothing", instance.name)
1891 return
1892
1893 class _TryShutdown(object):
1894 def __init__(self):
1895 self.tried_once = False
1896
1897 def __call__(self):
1898 if not _GetInstanceInfo(instance):
1899 return
1900
1901 try:
1902 hyper.StopInstance(instance, retry=self.tried_once, timeout=timeout)
1903 if store_reason:
1904 _StoreInstReasonTrail(instance.name, reason)
1905 except errors.HypervisorError, err:
1906 # if the instance is no longer existing, consider this a
1907 # success and go to cleanup
1908 if not _GetInstanceInfo(instance):
1909 return
1910
1911 _Fail("Failed to stop instance '%s': %s", instance.name, err)
1912
1913 self.tried_once = True
1914
1915 raise utils.RetryAgain()
1916
1917 try:
1918 utils.Retry(_TryShutdown(), 5, timeout)
1919 except utils.RetryTimeout:
1920 # the shutdown did not succeed
1921 logging.error("Shutdown of '%s' unsuccessful, forcing", instance.name)
1922
1923 try:
1924 hyper.StopInstance(instance, force=True)
1925 except errors.HypervisorError, err:
1926 # only raise an error if the instance still exists, otherwise
1927 # the error could simply be "instance ... unknown"!
1928 if _GetInstanceInfo(instance):
1929 _Fail("Failed to force stop instance '%s': %s", instance.name, err)
1930
1931 time.sleep(1)
1932
1933 if _GetInstanceInfo(instance):
1934 _Fail("Could not shutdown instance '%s' even by destroy", instance.name)
1935
1936 try:
1937 hyper.CleanupInstance(instance.name)
1938 except errors.HypervisorError, err:
1939 logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1940
1941 _RemoveBlockDevLinks(instance.name, instance.disks_info)
1942
1943
1944 def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1945 """Reboot an instance.
1946
1947 @type instance: L{objects.Instance}
1948 @param instance: the instance object to reboot
1949 @type reboot_type: str
1950 @param reboot_type: the type of reboot, one the following
1951 constants:
1952 - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1953 instance OS, do not recreate the VM
1954 - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1955 restart the VM (at the hypervisor level)
1956 - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1957 not accepted here, since that mode is handled differently, in
1958 cmdlib, and translates into full stop and start of the
1959 instance (instead of a call_instance_reboot RPC)
1960 @type shutdown_timeout: integer
1961 @param shutdown_timeout: maximum timeout for soft shutdown
1962 @type reason: list of reasons
1963 @param reason: the reason trail for this reboot
1964 @rtype: None
1965
1966 """
1967 # TODO: this is inconsistent with 'StartInstance' and 'InstanceShutdown'
1968 # because those functions simply 'return' on error whereas this one
1969 # raises an exception with '_Fail'
1970 if not _GetInstanceInfo(instance):
1971 _Fail("Cannot reboot instance '%s' that is not running", instance.name)
1972
1973 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1974 if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1975 try:
1976 hyper.RebootInstance(instance)
1977 except errors.HypervisorError, err:
1978 _Fail("Failed to soft reboot instance '%s': %s", instance.name, err)
1979 elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1980 try:
1981 InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
1982 result = StartInstance(instance, False, reason, store_reason=False)
1983 _StoreInstReasonTrail(instance.name, reason)
1984 return result
1985 except errors.HypervisorError, err:
1986 _Fail("Failed to hard reboot instance '%s': %s", instance.name, err)
1987 else:
1988 _Fail("Invalid reboot_type received: '%s'", reboot_type)
1989
1990
1991 def InstanceBalloonMemory(instance, memory):
1992 """Resize an instance's memory.
1993
1994 @type instance: L{objects.Instance}
1995 @param instance: the instance object
1996 @type memory: int
1997 @param memory: new memory amount in MB
1998 @rtype: None
1999
2000 """
2001 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2002 running = hyper.ListInstances(hvparams=instance.hvparams)
2003 if instance.name not in running:
2004 logging.info("Instance %s is not running, cannot balloon", instance.name)
2005 return
2006 try:
2007 hyper.BalloonInstanceMemory(instance, memory)
2008 except errors.HypervisorError, err:
2009 _Fail("Failed to balloon instance memory: %s", err, exc=True)
2010
2011
2012 def MigrationInfo(instance):
2013 """Gather information about an instance to be migrated.
2014
2015 @type instance: L{objects.Instance}
2016 @param instance: the instance definition
2017
2018 """
2019 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2020 try:
2021 info = hyper.MigrationInfo(instance)
2022 except errors.HypervisorError, err:
2023 _Fail("Failed to fetch migration information: %s", err, exc=True)
2024 return info
2025
2026
2027 def AcceptInstance(instance, info, target):
2028 """Prepare the node to accept an instance.
2029
2030 @type instance: L{objects.Instance}
2031 @param instance: the instance definition
2032 @type info: string/data (opaque)
2033 @param info: migration information, from the source node
2034 @type target: string
2035 @param target: target host (usually ip), on this node
2036
2037 """
2038 # TODO: why is this required only for DTS_EXT_MIRROR?
2039 if instance.disk_template in constants.DTS_EXT_MIRROR:
2040 # Create the symlinks, as the disks are not active
2041 # in any way
2042 try:
2043 _GatherAndLinkBlockDevs(instance)
2044 except errors.BlockDeviceError, err:
2045 _Fail("Block device error: %s", err, exc=True)
2046
2047 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2048 try:
2049 hyper.AcceptInstance(instance, info, target)
2050 except errors.HypervisorError, err:
2051 if instance.disk_template in constants.DTS_EXT_MIRROR:
2052 _RemoveBlockDevLinks(instance.name, instance.disks_info)
2053 _Fail("Failed to accept instance: %s", err, exc=True)
2054
2055
2056 def FinalizeMigrationDst(instance, info, success):
2057 """Finalize any preparation to accept an instance.
2058
2059 @type instance: L{objects.Instance}
2060 @param instance: the instance definition
2061 @type info: string/data (opaque)
2062 @param info: migration information, from the source node
2063 @type success: boolean
2064 @param success: whether the migration was a success or a failure
2065
2066 """
2067 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2068 try:
2069 hyper.FinalizeMigrationDst(instance, info, success)
2070 except errors.HypervisorError, err:
2071 _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
2072
2073
2074 def MigrateInstance(cluster_name, instance, target, live):
2075 """Migrates an instance to another node.
2076
2077 @type cluster_name: string
2078 @param cluster_name: name of the cluster
2079 @type instance: L{objects.Instance}
2080 @param instance: the instance definition
2081 @type target: string
2082 @param target: the target node name
2083 @type live: boolean
2084 @param live: whether the migration should be done live or not (the
2085 interpretation of this parameter is left to the hypervisor)
2086 @raise RPCFail: if migration fails for some reason
2087
2088 """
2089 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2090
2091 try:
2092 hyper.MigrateInstance(cluster_name, instance, target, live)
2093 except errors.HypervisorError, err:
2094 _Fail("Failed to migrate instance: %s", err, exc=True)
2095
2096
2097 def FinalizeMigrationSource(instance, success, live):
2098 """Finalize the instance migration on the source node.
2099
2100 @type instance: L{objects.Instance}
2101 @param instance: the instance definition of the migrated instance
2102 @type success: bool
2103 @param success: whether the migration succeeded or not
2104 @type live: bool
2105 @param live: whether the user requested a live migration or not
2106 @raise RPCFail: If the execution fails for some reason
2107
2108 """
2109 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2110
2111 try:
2112 hyper.FinalizeMigrationSource(instance, success, live)
2113 except Exception, err: # pylint: disable=W0703
2114 _Fail("Failed to finalize the migration on the source node: %s", err,
2115 exc=True)
2116
2117
2118 def GetMigrationStatus(instance):
2119 """Get the migration status
2120
2121 @type instance: L{objects.Instance}
2122 @param instance: the instance that is being migrated
2123 @rtype: L{objects.MigrationStatus}
2124 @return: the status of the current migration (one of
2125 L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
2126 progress info that can be retrieved from the hypervisor
2127 @raise RPCFail: If the migration status cannot be retrieved
2128
2129 """
2130 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2131 try:
2132 return hyper.GetMigrationStatus(instance)
2133 except Exception, err: # pylint: disable=W0703
2134 _Fail("Failed to get migration status: %s", err, exc=True)
2135
2136
2137 def HotplugDevice(instance, action, dev_type, device, extra, seq):
2138 """Hotplug a device
2139
2140 Hotplug is currently supported only for KVM Hypervisor.
2141 @type instance: L{objects.Instance}
2142 @param instance: the instance to which we hotplug a device
2143 @type action: string
2144 @param action: the hotplug action to perform
2145 @type dev_type: string
2146 @param dev_type: the device type to hotplug
2147 @type device: either L{objects.NIC} or L{objects.Disk}
2148 @param device: the device object to hotplug
2149 @type extra: tuple
2150 @param extra: extra info used for disk hotplug (disk link, drive uri)
2151 @type seq: int
2152 @param seq: the index of the device from master perspective
2153 @raise RPCFail: in case instance does not have KVM hypervisor
2154
2155 """
2156 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2157 try:
2158 hyper.VerifyHotplugSupport(instance, action, dev_type)
2159 except errors.HotplugError, err:
2160 _Fail("Hotplug is not supported: %s", err)
2161
2162 if action == constants.HOTPLUG_ACTION_ADD:
2163 fn = hyper.HotAddDevice
2164 elif action == constants.HOTPLUG_ACTION_REMOVE:
2165 fn = hyper.HotDelDevice
2166 elif action == constants.HOTPLUG_ACTION_MODIFY:
2167 fn = hyper.HotModDevice
2168 else:
2169 assert action in constants.HOTPLUG_ALL_ACTIONS
2170
2171 return fn(instance, dev_type, device, extra, seq)
2172
2173
2174 def HotplugSupported(instance):
2175 """Checks if hotplug is generally supported.
2176
2177 """
2178 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2179 try:
2180 hyper.HotplugSupported(instance)
2181 except errors.HotplugError, err:
2182 _Fail("Hotplug is not supported: %s", err)
2183
2184
2185 def ModifyInstanceMetadata(metadata):
2186 """Sends instance data to the metadata daemon.
2187
2188 Uses the Luxi transport layer to communicate with the metadata
2189 daemon configuration server. It starts the metadata daemon if it is
2190 not running.
2191 The daemon must be enabled during at configuration time.
2192
2193 @type metadata: dict
2194 @param metadata: instance metadata obtained by calling
2195 L{objects.Instance.ToDict} on an instance object
2196
2197 """
2198 if not constants.ENABLE_METAD:
2199 raise errors.ProgrammerError("The metadata deamon is disabled, yet"
2200 " ModifyInstanceMetadata has been called")
2201
2202 if not utils.IsDaemonAlive(constants.METAD):
2203 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.METAD])
2204 if result.failed:
2205 raise errors.HypervisorError("Failed to start metadata daemon")
2206
2207 def _Connect():
2208 return transport.Transport(pathutils.SOCKET_DIR + "/ganeti-metad",
2209 allow_non_master=True)
2210
2211 retries = 5
2212
2213 while True:
2214 try:
2215 trans = utils.Retry(_Connect, 1.0, constants.LUXI_DEF_CTMO)
2216 break
2217 except utils.RetryTimeout:
2218 raise TimeoutError("Connection to metadata daemon timed out")
2219 except (socket.error, NoMasterError), err:
2220 if retries == 0:
2221 logging.error("Failed to connect to the metadata daemon",
2222 exc_info=True)
2223 raise TimeoutError("Failed to connect to metadata daemon: %s" % err)
2224 else:
2225 retries -= 1
2226
2227 data = serializer.DumpJson(metadata,
2228 private_encoder=serializer.EncodeWithPrivateFields)
2229
2230 trans.Send(data)
2231 trans.Close()
2232
2233
2234 def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
2235 """Creates a block device for an instance.
2236
2237 @type disk: L{objects.Disk}
2238 @param disk: the object describing the disk we should create
2239 @type size: int
2240 @param size: the size of the physical underlying device, in MiB
2241 @type owner: str
2242 @param owner: the name of the instance for which disk is created,
2243 used for device cache data
2244 @type on_primary: boolean
2245 @param on_primary: indicates if it is the primary node or not
2246 @type info: string
2247 @param info: string that will be sent to the physical device
2248 creation, used for example to set (LVM) tags on LVs
2249 @type excl_stor: boolean
2250 @param excl_stor: Whether exclusive_storage is active
2251
2252 @return: the new unique_id of the device (this can sometime be
2253 computed only after creation), or None. On secondary nodes,
2254 it's not required to return anything.
2255
2256 """
2257 # TODO: remove the obsolete "size" argument
2258 # pylint: disable=W0613
2259 clist = []
2260 if disk.children:
2261 for child in disk.children:
2262 try:
2263 crdev = _RecursiveAssembleBD(child, owner, on_primary)
2264 except errors.BlockDeviceError, err:
2265 _Fail("Can't assemble device %s: %s", child, err)
2266 if on_primary or disk.AssembleOnSecondary():
2267 # we need the children open in case the device itself has to
2268 # be assembled
2269 try:
2270 # pylint: disable=E1103
2271 crdev.Open()
2272 except errors.BlockDeviceError, err:
2273 _Fail("Can't make child '%s' read-write: %s", child, err)
2274 clist.append(crdev)
2275
2276 try:
2277 device = bdev.Create(disk, clist, excl_stor)
2278 except errors.BlockDeviceError, err:
2279 _Fail("Can't create block device: %s", err)
2280
2281 if on_primary or disk.AssembleOnSecondary():
2282 try:
2283 device.Assemble()
2284 except errors.BlockDeviceError, err:
2285 _Fail("Can't assemble device after creation, unusual event: %s", err)
2286 if on_primary or disk.OpenOnSecondary():
2287 try:
2288 device.Open(force=True)
2289 except errors.BlockDeviceError, err:
2290 _Fail("Can't make device r/w after creation, unusual event: %s", err)
2291 DevCacheManager.UpdateCache(device.dev_path, owner,
2292 on_primary, disk.iv_name)
2293
2294 device.SetInfo(info)
2295
2296 return device.unique_id
2297
2298
2299 def _DumpDevice(source_path, target_path, offset, size, truncate):
2300 """This function images/wipes the device using a local file.
2301
2302 @type source_path: string
2303 @param source_path: path of the image or data source (e.g., "/dev/zero")
2304
2305 @type target_path: string
2306 @param target_path: path of the device to image/wipe
2307
2308 @type offset: int
2309 @param offset: offset in MiB in the output file
2310
2311 @type size: int
2312 @param size: maximum size in MiB to write (data source might be smaller)
2313
2314 @type truncate: bool
2315 @param truncate: whether the file should be truncated
2316
2317 @return: None
2318 @raise RPCFail: in case of failure
2319
2320 """
2321 # Internal sizes are always in Mebibytes; if the following "dd" command
2322 # should use a different block size the offset and size given to this
2323 # function must be adjusted accordingly before being passed to "dd".
2324 block_size = 1024 * 1024
2325
2326 cmd = [constants.DD_CMD, "if=%s" % source_path, "seek=%d" % offset,
2327 "bs=%s" % block_size, "oflag=direct", "of=%s" % target_path,
2328 "count=%d" % size]
2329
2330 if not truncate:
2331 cmd.append("conv=notrunc")
2332
2333 result = utils.RunCmd(cmd)
2334
2335 if result.failed:
2336 _Fail("Dump command '%s' exited with error: %s; output: %s", result.cmd,
2337 result.fail_reason, result.output)
2338
2339
2340 def _DownloadAndDumpDevice(source_url, target_path, size):
2341 """This function images a device using a downloaded image file.
2342
2343 @type source_url: string
2344 @param source_url: URL of image to dump to disk
2345
2346 @type target_path: string
2347 @param target_path: path of the device to image
2348
2349 @type size: int
2350 @param size: maximum size in MiB to write (data source might be smaller)
2351
2352 @rtype: NoneType
2353 @return: None
2354 @raise RPCFail: in case of download or write failures
2355
2356 """
2357 class DDParams(object):
2358 def __init__(self, current_size, total_size):
2359 self.current_size = current_size
2360 self.total_size = total_size
2361 self.image_size_error = False
2362
2363 def dd_write(ddparams, out):
2364 if ddparams.current_size < ddparams.total_size:
2365 ddparams.current_size += len(out)
2366 target_file.write(out)
2367 else:
2368 ddparams.image_size_error = True
2369 return -1
2370
2371 target_file = open(target_path, "r+")
2372 ddparams = DDParams(0, 1024 * 1024 * size)
2373
2374 curl = pycurl.Curl()
2375 curl.setopt(pycurl.VERBOSE, True)
2376 curl.setopt(pycurl.NOSIGNAL, True)
2377 curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
2378 curl.setopt(pycurl.URL, source_url)
2379 curl.setopt(pycurl.WRITEFUNCTION, lambda out: dd_write(ddparams, out))
2380
2381 try:
2382 curl.perform()
2383 except pycurl.error:
2384 if ddparams.image_size_error:
2385 _Fail("Disk image larger than the disk")
2386 else:
2387 raise
2388
2389 target_file.close()
2390
2391
2392 def BlockdevWipe(disk, offset, size):
2393 """Wipes a block device.
2394
2395 @type disk: L{objects.Disk}
2396 @param disk: the disk object we want to wipe
2397 @type offset: int
2398 @param offset: The offset in MiB in the file
2399 @type size: int
2400 @param size: The size in MiB to write
2401
2402 """
2403 try:
2404 rdev = _RecursiveFindBD(disk)
2405 except errors.BlockDeviceError:
2406 rdev = None
2407
2408 if not rdev:
2409 _Fail("Cannot wipe device %s: device not found", disk.iv_name)
2410 if offset < 0:
2411 _Fail("Negative offset")
2412 if size < 0:
2413 _Fail("Negative size")
2414 if offset > rdev.size:
2415 _Fail("Wipe offset is bigger than device size")
2416 if (offset + size) > rdev.size:
2417 _Fail("Wipe offset and size are bigger than device size")
2418
2419 _DumpDevice("/dev/zero", rdev.dev_path, offset, size, True)
2420
2421
2422 def BlockdevImage(disk, image, size):
2423 """Images a block device either by dumping a local file or
2424 downloading a URL.
2425
2426 @type disk: L{objects.Disk}
2427 @param disk: the disk object we want to image
2428
2429 @type image: string
2430 @param image: file path to the disk image be dumped
2431
2432 @type size: int
2433 @param size: The size in MiB to write
2434
2435 @rtype: NoneType
2436 @return: None
2437 @raise RPCFail: in case of failure
2438
2439 """
2440 if not (utils.IsUrl(image) or os.path.exists(image)):
2441 _Fail("Image '%s' not found", image)
2442
2443 try:
2444 rdev = _RecursiveFindBD(disk)
2445 except errors.BlockDeviceError:
2446 rdev = None
2447
2448 if not rdev:
2449 _Fail("Cannot image device %s: device not found", disk.iv_name)
2450 if size < 0:
2451 _Fail("Negative size")
2452 if size > rdev.size:
2453 _Fail("Image size is bigger than device size")
2454
2455 if utils.IsUrl(image):
2456 _DownloadAndDumpDevice(image, rdev.dev_path, size)
2457 else:
2458 _DumpDevice(image, rdev.dev_path, 0, size, False)
2459
2460
2461 def BlockdevPauseResumeSync(disks, pause):
2462 """Pause or resume the sync of the block device.
2463
2464 @type disks: list of L{objects.Disk}
2465 @param disks: the disks object we want to pause/resume
2466 @type pause: bool
2467 @param pause: Wheater to pause or resume
2468
2469 """
2470 success = []
2471 for disk in disks:
2472 try:
2473 rdev = _RecursiveFindBD(disk)
2474 except errors.BlockDeviceError:
2475 rdev = None
2476
2477 if not rdev:
2478 success.append((False, ("Cannot change sync for device %s:"
2479 " device not found" % disk.iv_name)))
2480 continue
2481
2482 result = rdev.PauseResumeSync(pause)
2483
2484 if result:
2485 success.append((result, None))
2486 else:
2487 if pause:
2488 msg = "Pause"
2489 else:
2490 msg = "Resume"
2491 success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
2492
2493 return success
2494
2495
2496 def BlockdevRemove(disk):
2497 """Remove a block device.
2498
2499 @note: This is intended to be called recursively.
2500
2501 @type disk: L{objects.Disk}
2502 @param disk: the disk object we should remove
2503 @rtype: boolean
2504 @return: the success of the operation
2505
2506 """
2507 msgs = []
2508 try:
2509 rdev = _RecursiveFindBD(disk)
2510 except errors.BlockDeviceError, err:
2511 # probably can't attach
2512 logging.info("Can't attach to device %s in remove", disk)
2513 rdev = None
2514 if rdev is not None:
2515 r_path = rdev.dev_path
2516
2517 def _TryRemove():
2518 try:
2519 rdev.Remove()
2520 return []
2521 except errors.BlockDeviceError, err:
2522 return [str(err)]
2523
2524 msgs.extend(utils.SimpleRetry([], _TryRemove,
2525 constants.DISK_REMOVE_RETRY_INTERVAL,
2526 constants.DISK_REMOVE_RETRY_TIMEOUT))
2527
2528 if not msgs:
2529 DevCacheManager.RemoveCache(r_path)
2530
2531 if disk.children:
2532 for child in disk.children:
2533 try:
2534 BlockdevRemove(child)
2535 except RPCFail, err:
2536 msgs.append(str(err))
2537
2538 if msgs:
2539 _Fail("; ".join(msgs))
2540
2541
2542 def _RecursiveAssembleBD(disk, owner, as_primary):
2543 """Activate a block device for an instance.
2544
2545 This is run on the primary and secondary nodes for an instance.
2546
2547 @note: this function is called recursively.
2548
2549 @type disk: L{objects.Disk}
2550 @param disk: the disk we try to assemble
2551 @type owner: str
2552 @param owner: the name of the instance which owns the disk
2553 @type as_primary: boolean
2554 @param as_primary: if we should make the block device
2555 read/write
2556
2557 @return: the assembled device or None (in case no device
2558 was assembled)
2559 @raise errors.BlockDeviceError: in case there is an error
2560 during the activation of the children or the device
2561 itself
2562
2563 """
2564 children = []
2565 if disk.children:
2566 mcn = disk.ChildrenNeeded()
2567 if mcn == -1:
2568 mcn = 0 # max number of Nones allowed
2569 else:
2570 mcn = len(disk.children) - mcn # max number of Nones
2571 for chld_disk in disk.children:
2572 try:
2573 cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
2574 except errors.BlockDeviceError, err:
2575 if children.count(None) >= mcn:
2576 raise
2577 cdev = None
2578 logging.error("Error in child activation (but continuing): %s",
2579 str(err))
2580 children.append(cdev)
2581
2582 if as_primary or disk.AssembleOnSecondary():
2583 r_dev = bdev.Assemble(disk, children)
2584 result = r_dev
2585 if as_primary or disk.OpenOnSecondary():
2586 r_dev.Open()
2587 DevCacheManager.UpdateCache(r_dev.dev_path, owner,
2588 as_primary, disk.iv_name)
2589
2590 else:
2591 result = True
2592 return result
2593
2594
2595 def BlockdevAssemble(disk, instance, as_primary, idx):
2596 """Activate a block device for an instance.
2597
2598 This is a wrapper over _RecursiveAssembleBD.
2599
2600 @rtype: str or boolean
2601 @return: a tuple with the C{/dev/...} path and the created symlink
2602 for primary nodes, and (C{True}, C{True}) for secondary nodes
2603
2604 """
2605 try:
2606 result = _RecursiveAssembleBD(disk, instance.name, as_primary)
2607 if isinstance(result, BlockDev):
2608 # pylint: disable=E1103
2609 dev_path = result.dev_path
2610 link_name = None
2611 uri = None
2612 if as_primary:
2613 link_name = _SymlinkBlockDev(instance.name, dev_path, idx)
2614 uri = _CalculateDeviceURI(instance, disk, result)
2615 elif result:
2616 return result, result
2617 else:
2618 _Fail("Unexpected result from _RecursiveAssembleBD")
2619 except errors.BlockDeviceError, err:
2620 _Fail("Error while assembling disk: %s", err, exc=True)
2621 except OSError, err:
2622 _Fail("Error while symlinking disk: %s", err, exc=True)
2623
2624 return dev_path, link_name, uri
2625
2626
2627 def BlockdevShutdown(disk):
2628 """Shut down a block device.
2629
2630 First, if the device is assembled (Attach() is successful), then
2631 the device is shutdown. Then the children of the device are
2632 shutdown.
2633
2634 This function is called recursively. Note that we don't cache the
2635 children or such, as oppossed to assemble, shutdown of different
2636 devices doesn't require that the upper device was active.
2637
2638 @type disk: L{objects.Disk}
2639 @param disk: the description of the disk we should
2640 shutdown
2641 @rtype: None
2642
2643 """
2644 msgs = []
2645 r_dev = _RecursiveFindBD(disk)
2646 if r_dev is not None:
2647 r_path = r_dev.dev_path
2648 try:
2649 r_dev.Shutdown()
2650 DevCacheManager.RemoveCache(r_path)
2651 except errors.BlockDeviceError, err:
2652 msgs.append(str(err))
2653
2654 if disk.children:
2655 for child in disk.children:
2656 try:
2657 BlockdevShutdown(child)
2658 except RPCFail, err:
2659 msgs.append(str(err))
2660
2661 if msgs:
2662 _Fail("; ".join(msgs))
2663
2664
2665 def BlockdevAddchildren(parent_cdev, new_cdevs):
2666 """Extend a mirrored block device.
2667
2668 @type parent_cdev: L{objects.Disk}
2669 @param parent_cdev: the disk to which we should add children
2670 @type new_cdevs: list of L{objects.Disk}
2671 @param new_cdevs: the list of children which we should add
2672 @rtype: None
2673
2674 """
2675 parent_bdev = _RecursiveFindBD(parent_cdev)
2676 if parent_bdev is None:
2677 _Fail("Can't find parent device '%s' in add children", parent_cdev)
2678 new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
2679 if new_bdevs.count(None) > 0:
2680 _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
2681 parent_bdev.AddChildren(new_bdevs)
2682
2683
2684 def BlockdevRemovechildren(parent_cdev, new_cdevs):
2685 """Shrink a mirrored block device.
2686
2687 @type parent_cdev: L{objects.Disk}
2688 @param parent_cdev: the disk from which we should remove children
2689 @type new_cdevs: list of L{objects.Disk}
2690 @param new_cdevs: the list of children which we should remove
2691 @rtype: None
2692
2693 """
2694 parent_bdev = _RecursiveFindBD(parent_cdev)
2695 if parent_bdev is None:
2696 _Fail("Can't find parent device '%s' in remove children", parent_cdev)
2697 devs = []
2698 for disk in new_cdevs:
2699 rpath = disk.StaticDevPath()
2700 if rpath is None:
2701 bd = _RecursiveFindBD(disk)
2702 if bd is None:
2703 _Fail("Can't find device %s while removing children", disk)
2704 else:
2705 devs.append(bd.dev_path)
2706 else:
2707 if not utils.IsNormAbsPath(rpath):
2708 _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
2709 devs.append(rpath)
2710 parent_bdev.RemoveChildren(devs)
2711
2712
2713 def BlockdevGetmirrorstatus(disks):
2714 """Get the mirroring status of a list of devices.
2715
2716 @type disks: list of L{objects.Disk}
2717 @param disks: the list of disks which we should query
2718 @rtype: disk
2719 @return: List of L{objects.BlockDevStatus}, one for each disk
2720 @raise errors.BlockDeviceError: if any of the disks cannot be
2721 found
2722
2723 """
2724 stats = []
2725 for dsk in disks:
2726 rbd = _RecursiveFindBD(dsk)
2727 if rbd is None:
2728 _Fail("Can't find device %s", dsk)
2729
2730 stats.append(rbd.CombinedSyncStatus())
2731
2732 return stats
2733
2734
2735 def BlockdevGetmirrorstatusMulti(disks):
2736 """Get the mirroring status of a list of devices.
2737
2738 @type disks: list of L{objects.Disk}
2739 @param disks: the list of disks which we should query
2740 @rtype: disk
2741 @return: List of tuples, (bool, status), one for each disk; bool denotes
2742 success/failure, status is L{objects.BlockDevStatus} on success, string
2743 otherwise
2744
2745 """
2746 result = []
2747 for disk in disks:
2748 try:
2749 rbd = _RecursiveFindBD(disk)
2750 if rbd is None:
2751 result.append((False, "Can't find device %s" % disk))
2752 continue
2753
2754 status = rbd.CombinedSyncStatus()
2755 except errors.BlockDeviceError, err:
2756 logging.exception("Error while getting disk status")
2757 result.append((False, str(err)))
2758 else:
2759 result.append((True, status))
2760
2761 assert len(disks) == len(result)
2762
2763 return result
2764
2765
2766 def _RecursiveFindBD(disk):
2767 """Check if a device is activated.
2768
2769 If so, return information about the real device.
2770
2771 @type disk: L{objects.Disk}
2772 @param disk: the disk object we need to find
2773
2774 @return: None if the device can't be found,
2775 otherwise the device instance
2776
2777 """
2778 children = []
2779 if disk.children:
2780 for chdisk in disk.children:
2781 children.append(_RecursiveFindBD(chdisk))
2782
2783 return bdev.FindDevice(disk, children)
2784
2785
2786 def _OpenRealBD(disk):
2787 """Opens the underlying block device of a disk.
2788
2789 @type disk: L{objects.Disk}
2790 @param disk: the disk object we want to open
2791
2792 """
2793 real_disk = _RecursiveFindBD(disk)
2794 if real_disk is None:
2795 _Fail("Block device '%s' is not set up", disk)
2796
2797 real_disk.Open()
2798
2799 return real_disk
2800
2801
2802 def BlockdevFind(disk):
2803 """Check if a device is activated.
2804
2805 If it is, return information about the real device.
2806
2807 @type disk: L{objects.Disk}
2808 @param disk: the disk to find
2809 @rtype: None or objects.BlockDevStatus
2810 @return: None if the disk cannot be found, otherwise a the current
2811 information
2812
2813 """
2814 try:
2815 rbd = _RecursiveFindBD(disk)
2816 except errors.BlockDeviceError, err:
2817 _Fail("Failed to find device: %s", err, exc=True)
2818
2819 if rbd is None:
2820 return None
2821
2822 return rbd.GetSyncStatus()
2823
2824
2825 def BlockdevGetdimensions(disks):
2826 """Computes the size of the given disks.
2827
2828 If a disk is not found, returns None instead.
2829
2830 @type disks: list of L{objects.Disk}
2831 @param disks: the list of disk to compute the size for
2832 @rtype: list
2833 @return: list with elements None if the disk cannot be found,
2834 otherwise the pair (size, spindles), where spindles is None if the
2835 device doesn't support that
2836
2837 """
2838 result = []
2839 for cf in disks:
2840 try:
2841 rbd = _RecursiveFindBD(cf)
2842 except errors.BlockDeviceError:
2843 result.append(None)
2844 continue
2845 if rbd is None:
2846 result.append(None)
2847 else:
2848 result.append(rbd.GetActualDimensions())
2849 return result
2850
2851
2852 def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2853 """Write a file to the filesystem.
2854
2855 This allows the master to overwrite(!) a file. It will only perform
2856 the operation if the file belongs to a list of configuration files.
2857
2858 @type file_name: str
2859 @param file_name: the target file name
2860 @type data: str
2861 @param data: the new contents of the file
2862 @type mode: int
2863 @param mode: the mode to give the file (can be None)
2864 @type uid: string
2865 @param uid: the owner of the file
2866 @type gid: string
2867 @param gid: the group of the file
2868 @type atime: float
2869 @param atime: the atime to set on the file (can be None)
2870 @type mtime: float
2871 @param mtime: the mtime to set on the file (can be None)
2872 @rtype: None
2873
2874 """
2875 file_name = vcluster.LocalizeVirtualPath(file_name)
2876
2877 if not os.path.isabs(file_name):
2878 _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2879
2880 if file_name not in _ALLOWED_UPLOAD_FILES:
2881 _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2882 file_name)
2883
2884 raw_data = _Decompress(data)
2885
2886 if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2887 _Fail("Invalid username/groupname type")
2888
2889 getents = runtime.GetEnts()
2890 uid = getents.LookupUser(uid)
2891 gid = getents.LookupGroup(gid)
2892
2893 utils.SafeWriteFile(file_name, None,
2894 data=raw_data, mode=mode, uid=uid, gid=gid,
2895 atime=atime, mtime=mtime)
2896
2897
2898 def RunOob(oob_program, command, node, timeout):
2899 """Executes oob_program with given command on given node.
2900
2901 @param oob_program: The path to the executable oob_program
2902 @param command: The command to invoke on oob_program
2903 @param node: The node given as an argument to the program
2904 @param timeout: Timeout after which we kill the oob program
2905
2906 @return: stdout
2907 @raise RPCFail: If execution fails for some reason
2908
2909 """
2910 result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2911
2912 if result.failed:
2913 _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2914 result.fail_reason, result.output)
2915
2916 return result.stdout
2917
2918
2919 def _OSOndiskAPIVersion(os_dir):
2920 """Compute and return the API version of a given OS.
2921
2922 This function will try to read the API version of the OS residing in
2923 the 'os_dir' directory.
2924
2925 @type os_dir: str
2926 @param os_dir: the directory in which we should look for the OS
2927 @rtype: tuple
2928 @return: tuple (status, data) with status denoting the validity and
2929 data holding either the valid versions or an error message
2930
2931 """
2932 api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2933
2934 try:
2935 st = os.stat(api_file)
2936 except EnvironmentError, err:
2937 return False, ("Required file '%s' not found under path %s: %s" %
2938 (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2939
2940 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2941 return False, ("File '%s' in %s is not a regular file" %
2942 (constants.OS_API_FILE, os_dir))
2943
2944 try:
2945 api_versions = utils.ReadFile(api_file).splitlines()
2946 except EnvironmentError, err:
2947 return False, ("Error while reading the API version file at %s: %s" %
2948 (api_file, utils.ErrnoOrStr(err)))
2949
2950 try:
2951 api_versions = [int(version.strip()) for version in api_versions]
2952 except (TypeError, ValueError), err:
2953 return False, ("API version(s) can't be converted to integer: %s" %
2954 str(err))
2955
2956 return True, api_versions
2957
2958
2959 def DiagnoseOS(top_dirs=None):
2960 """Compute the validity for all OSes.
2961
2962 @type top_dirs: list
2963 @param top_dirs: the list of directories in which to
2964 search (if not given defaults to
2965 L{pathutils.OS_SEARCH_PATH})
2966 @rtype: list of L{objects.OS}
2967 @return: a list of tuples (name, path, status, diagnose, variants,
2968 parameters, api_version) for all (potential) OSes under all
2969 search paths, where:
2970 - name is the (potential) OS name
2971 - path is the full path to the OS
2972 - status True/False is the validity of the OS
2973 - diagnose is the error message for an invalid OS, otherwise empty
2974 - variants is a list of supported OS variants, if any
2975 - parameters is a list of (name, help) parameters, if any
2976 - api_version is a list of support OS API versions
2977
2978 """
2979 if top_dirs is None:
2980 top_dirs = pathutils.OS_SEARCH_PATH
2981
2982 result = []
2983 for dir_name in top_dirs:
2984 if os.path.isdir(dir_name):
2985 try:
2986 f_names = utils.ListVisibleFiles(dir_name)
2987 except EnvironmentError, err:
2988 logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2989 break
2990 for name in f_names:
2991 os_path = utils.PathJoin(dir_name, name)
2992 status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2993 if status:
2994 diagnose = ""
2995 variants = os_inst.supported_variants
2996 parameters = os_inst.supported_parameters
2997 api_versions = os_inst.api_versions
2998 trusted = False if os_inst.create_script_untrusted else True
2999 else:
3000 diagnose = os_inst
3001 variants = parameters = api_versions = []
3002 trusted = True
3003 result.append((name, os_path, status, diagnose, variants,
3004 parameters, api_versions, trusted))
3005
3006 return result
3007
3008
3009 def _TryOSFromDisk(name, base_dir=None):
3010 """Create an OS instance from disk.
3011
3012 This function will return an OS instance if the given name is a
3013 valid OS name.
3014
3015 @type base_dir: string
3016 @keyword base_dir: Base directory containing OS installations.
3017 Defaults to a search in all the OS_SEARCH_PATH dirs.
3018 @rtype: tuple
3019 @return: success and either the OS instance if we find a valid one,
3020 or error message
3021
3022 """
3023 if base_dir is None:
3024 os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
3025 else:
3026 os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
3027
3028 if os_dir is None:
3029 return False, "Directory for OS %s not found in search path" % name
3030
3031 status, api_versions = _OSOndiskAPIVersion(os_dir)
3032 if not status:
3033 # push the error up
3034 return status, api_versions
3035
3036 if not constants.OS_API_VERSIONS.intersection(api_versions):
3037 return False, ("API version mismatch for path '%s': found %s, want %s." %
3038 (os_dir, api_versions, constants.OS_API_VERSIONS))
3039
3040 # OS Files dictionary, we will populate it with the absolute path
3041 # names; if the value is True, then it is a required file, otherwise
3042 # an optional one
3043 os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
3044
3045 os_files[constants.OS_SCRIPT_CREATE] = False
3046 os_files[constants.OS_SCRIPT_CREATE_UNTRUSTED] = False
3047
3048 if max(api_versions) >= constants.OS_API_V15:
3049 os_files[constants.OS_VARIANTS_FILE] = False
3050
3051 if max(api_versions) >= constants.OS_API_V20:
3052 os_files[constants.OS_PARAMETERS_FILE] = True
3053 else:
3054 del os_files[constants.OS_SCRIPT_VERIFY]
3055
3056 for (filename, required) in os_files.items():
3057 os_files[filename] = utils.PathJoin(os_dir, filename)
3058
3059 try:
3060 st = os.stat(os_files[filename])
3061 except EnvironmentError, err:
3062 if err.errno == errno.ENOENT and not required:
3063 del os_files[filename]
3064 continue
3065 return False, ("File '%s' under path '%s' is missing (%s)" %
3066 (filename, os_dir, utils.ErrnoOrStr(err)))
3067
3068 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3069 return False, ("File '%s' under path '%s' is not a regular file" %
3070 (filename, os_dir))
3071
3072 if filename in constants.OS_SCRIPTS:
3073 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3074 return False, ("File '%s' under path '%s' is not executable" %
3075 (filename, os_dir))
3076
3077 if not constants.OS_SCRIPT_CREATE in os_files and \
3078 not constants.OS_SCRIPT_CREATE_UNTRUSTED in os_files:
3079 return False, ("A create script (trusted or untrusted) under path '%s'"
3080 " must exist" % os_dir)
3081
3082 create_script = os_files.get(constants.OS_SCRIPT_CREATE, None)
3083 create_script_untrusted = os_files.get(constants.OS_SCRIPT_CREATE_UNTRUSTED,
3084 None)
3085
3086 variants = []
3087 if constants.OS_VARIANTS_FILE in os_files:
3088 variants_file = os_files[constants.OS_VARIANTS_FILE]
3089 try:
3090 variants = \
3091 utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
3092 except EnvironmentError, err:
3093 # we accept missing files, but not other errors
3094 if err.errno != errno.ENOENT:
3095 return False, ("Error while reading the OS variants file at %s: %s" %
3096 (variants_file, utils.ErrnoOrStr(err)))
3097
3098 parameters = []
3099 if constants.OS_PARAMETERS_FILE in os_files:
3100 parameters_file = os_files[constants.OS_PARAMETERS_FILE]
3101 try:
3102 parameters = utils.ReadFile(parameters_file).splitlines()
3103 except EnvironmentError, err:
3104 return False, ("Error while reading the OS parameters file at %s: %s" %
3105 (parameters_file, utils.ErrnoOrStr(err)))
3106 parameters = [v.split(None, 1) for v in parameters]
3107
3108 os_obj = objects.OS(name=name, path=os_dir,
3109 create_script=create_script,
3110 create_script_untrusted=create_script_untrusted,
3111 export_script=os_files[constants.OS_SCRIPT_EXPORT],
3112 import_script=os_files[constants.OS_SCRIPT_IMPORT],
3113 rename_script=os_files[constants.OS_SCRIPT_RENAME],
3114 verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
3115 None),
3116 supported_variants=variants,
3117 supported_parameters=parameters,
3118 api_versions=api_versions)
3119 return True, os_obj
3120
3121
3122 def OSFromDisk(name, base_dir=None):
3123 """Create an OS instance from disk.
3124
3125 This function will return an OS instance if the given name is a
3126 valid OS name. Otherwise, it will raise an appropriate
3127 L{RPCFail} exception, detailing why this is not a valid OS.
3128
3129 This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
3130 an exception but returns true/false status data.
3131
3132 @type base_dir: string
3133 @keyword base_dir: Base directory containing OS installations.
3134 Defaults to a search in all the OS_SEARCH_PATH dirs.
3135 @rtype: L{objects.OS}
3136 @return: the OS instance if we find a valid one
3137 @raise RPCFail: if we don't find a valid OS
3138
3139 """
3140 name_only = objects.OS.GetName(name)
3141 status, payload = _TryOSFromDisk(name_only, base_dir)
3142
3143 if not status:
3144 _Fail(payload)
3145
3146 return payload
3147
3148
3149 def OSCoreEnv(os_name, inst_os, os_params, debug=0):
3150 """Calculate the basic environment for an os script.
3151
3152 @type os_name: str
3153 @param os_name: full operating system name (including variant)
3154 @type inst_os: L{objects.OS}
3155 @param inst_os: operating system for which the environment is being built
3156 @type os_params: dict
3157 @param os_params: the OS parameters
3158 @type debug: integer
3159 @param debug: debug level (0 or 1, for OS Api 10)
3160 @rtype: dict
3161 @return: dict of environment variables
3162 @raise errors.BlockDeviceError: if the block device
3163 cannot be found
3164
3165 """
3166 result = {}
3167 api_version = \
3168 max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
3169 result["OS_API_VERSION"] = "%d" % api_version
3170 result["OS_NAME"] = inst_os.name
3171 result["DEBUG_LEVEL"] = "%d" % debug
3172
3173 # OS variants
3174 if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
3175 variant = objects.OS.GetVariant(os_name)
3176 if not variant:
3177 variant = inst_os.supported_variants[0]
3178 else:
3179 variant = ""
3180 result["OS_VARIANT"] = variant
3181
3182 # OS params
3183 for pname, pvalue in os_params.items():
3184 result["OSP_%s" % pname.upper().replace("-", "_")] = pvalue
3185
3186 # Set a default path otherwise programs called by OS scripts (or
3187 # even hooks called from OS scripts) might break, and we don't want
3188 # to have each script require setting a PATH variable
3189 result["PATH"] = constants.HOOKS_PATH
3190
3191 return result
3192
3193
3194 def OSEnvironment(instance, inst_os, debug=0):
3195 """Calculate the environment for an os script.
3196
3197 @type instance: L{objects.Instance}
3198 @param instance: target instance for the os script run
3199 @type inst_os: L{objects.OS}
3200 @param inst_os: operating system for which the environment is being built
3201 @type debug: integer
3202 @param debug: debug level (0 or 1, for OS Api 10)
3203 @rtype: dict
3204 @return: dict of environment variables
3205 @raise errors.BlockDeviceError: if the block device
3206 cannot be found
3207
3208 """
3209 result = OSCoreEnv(instance.os, inst_os, objects.FillDict(instance.osparams,
3210 instance.osparams_private.Unprivate()), debug=debug)
3211
3212 for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
3213 result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
3214
3215 result["HYPERVISOR"] = instance.hypervisor
3216 result["DISK_COUNT"] = "%d" % len(instance.disks_info)
3217 result["NIC_COUNT"] = "%d" % len(instance.nics)
3218 result["INSTANCE_SECONDARY_NODES"] = \
3219 ("%s" % " ".join(instance.secondary_nodes))
3220
3221 # Disks
3222 for idx, disk in enumerate(instance.disks_info):
3223 real_disk = _OpenRealBD(disk)
3224 result["DISK_%d_PATH" % idx] = real_disk.dev_path
3225 result["DISK_%d_ACCESS" % idx] = disk.mode
3226 result["DISK_%d_UUID" % idx] = disk.uuid
3227 if disk.name:
3228 result["DISK_%d_NAME" % idx] = disk.name
3229 if constants.HV_DISK_TYPE in instance.hvparams:
3230 result["DISK_%d_FRONTEND_TYPE" % idx] = \
3231 instance.hvparams[constants.HV_DISK_TYPE]
3232 if disk.dev_type in constants.DTS_BLOCK:
3233 result["DISK_%d_BACKEND_TYPE" % idx] = "block"
3234 elif disk.dev_type in constants.DTS_FILEBASED:
3235 result["DISK_%d_BACKEND_TYPE" % idx] = \
3236 "file:%s" % disk.logical_id[0]
3237
3238 # NICs
3239 for idx, nic in enumerate(instance.nics):
3240 result["NIC_%d_MAC" % idx] = nic.mac
3241 result["NIC_%d_UUID" % idx] = nic.uuid
3242 if nic.name:
3243 result["NIC_%d_NAME" % idx] = nic.name
3244 if nic.ip:
3245 result["NIC_%d_IP" % idx] = nic.ip
3246 result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
3247 if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3248 result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
3249 if nic.nicparams[constants.NIC_LINK]:
3250 result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
3251 if nic.netinfo:
3252 nobj = objects.Network.FromDict(nic.netinfo)
3253 result.update(nobj.HooksDict("NIC_%d_" % idx))
3254 if constants.HV_NIC_TYPE in instance.hvparams:
3255 result["NIC_%d_FRONTEND_TYPE" % idx] = \
3256 instance.hvparams[constants.HV_NIC_TYPE]
3257
3258 # HV/BE params
3259 for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
3260 for key, value in source.items():
3261 result["INSTANCE_%s_%s" % (kind, key)] = str(value)
3262
3263 return result
3264
3265
3266 def DiagnoseExtStorage(top_dirs=None):
3267 """Compute the validity for all ExtStorage Providers.
3268
3269 @type top_dirs: list
3270 @param top_dirs: the list of directories in which to
3271 search (if not given defaults to
3272 L{pathutils.ES_SEARCH_PATH})
3273 @rtype: list of L{objects.ExtStorage}
3274 @return: a list of tuples (name, path, status, diagnose, parameters)
3275 for all (potential) ExtStorage Providers under all
3276 search paths, where:
3277 - name is the (potential) ExtStorage Provider
3278 - path is the full path to the ExtStorage Provider
3279 - status True/False is the validity of the ExtStorage Provider
3280 - diagnose is the error message for an invalid ExtStorage Provider,
3281 otherwise empty
3282 - parameters is a list of (name, help) parameters, if any
3283
3284 """
3285 if top_dirs is None:
3286 top_dirs = pathutils.ES_SEARCH_PATH
3287
3288 result = []
3289 for dir_name in top_dirs:
3290 if os.path.isdir(dir_name):
3291 try:
3292 f_names = utils.ListVisibleFiles(dir_name)
3293 except EnvironmentError, err:
3294 logging.exception("Can't list the ExtStorage directory %s: %s",
3295 dir_name, err)
3296 break
3297 for name in f_names:
3298 es_path = utils.PathJoin(dir_name, name)
3299 status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
3300 if status:
3301 diagnose = ""
3302 parameters = es_inst.supported_parameters
3303 else:
3304 diagnose = es_inst
3305 parameters = []
3306 result.append((name, es_path, status, diagnose, parameters))
3307
3308 return result
3309
3310
3311 def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
3312 """Grow a stack of block devices.
3313
3314 This function is called recursively, with the childrens being the
3315 first ones to resize.
3316
3317 @type disk: L{objects.Disk}
3318 @param disk: the disk to be grown
3319 @type amount: integer
3320 @param amount: the amount (in mebibytes) to grow with
3321 @type dryrun: boolean
3322 @param dryrun: whether to execute the operation in simulation mode
3323 only, without actually increasing the size
3324 @param backingstore: whether to execute the operation on backing storage
3325 only, or on "logical" storage only; e.g. DRBD is logical storage,
3326 whereas LVM, file, RBD are backing storage
3327 @rtype: (status, result)
3328 @type excl_stor: boolean
3329 @param excl_stor: Whether exclusive_storage is active
3330 @return: a tuple with the status of the operation (True/False), and
3331 the errors message if status is False
3332
3333 """
3334 r_dev = _RecursiveFindBD(disk)
3335 if r_dev is None:
3336 _Fail("Cannot find block device %s", disk)
3337
3338 try:
3339 r_dev.Grow(amount, dryrun, backingstore, excl_stor)
3340 except errors.BlockDeviceError, err:
3341 _Fail("Failed to grow block device: %s", err, exc=True)
3342
3343
3344 def BlockdevSnapshot(disk):
3345 """Create a snapshot copy of a block device.
3346
3347 This function is called recursively, and the snapshot is actually created
3348 just for the leaf lvm backend device.
3349
3350 @type disk: L{objects.Disk}
3351 @param disk: the disk to be snapshotted
3352 @rtype: string
3353 @return: snapshot disk ID as (vg, lv)
3354
3355 """
3356 if disk.dev_type == constants.DT_DRBD8:
3357 if not disk.children:
3358 _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
3359 disk.unique_id)
3360 return BlockdevSnapshot(disk.children[0])
3361 elif disk.dev_type == constants.DT_PLAIN:
3362 r_dev = _RecursiveFindBD(disk)
3363 if r_dev is not None:
3364 # FIXME: choose a saner value for the snapshot size
3365 # let's stay on the safe side and ask for the full size, for now
3366 return r_dev.Snapshot(disk.size)
3367 else:
3368 _Fail("Cannot find block device %s", disk)
3369 else:
3370 _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
3371 disk.logical_id, disk.dev_type)
3372
3373
3374 def BlockdevSetInfo(disk, info):
3375 """Sets 'metadata' information on block devices.
3376
3377 This function sets 'info' metadata on block devices. Initial
3378 information is set at device creation; this function should be used
3379 for example after renames.
3380
3381 @type disk: L{objects.Disk}
3382 @param disk: the disk to be grown
3383 @type info: string
3384 @param info: new 'info' metadata
3385 @rtype: (status, result)
3386 @return: a tuple with the status of the operation (True/False), and
3387 the errors message if status is False
3388
3389 """
3390 r_dev = _RecursiveFindBD(disk)
3391 if r_dev is None:
3392 _Fail("Cannot find block device %s", disk)
3393
3394 try:
3395 r_dev.SetInfo(info)
3396 except errors.BlockDeviceError