Merge branch 'stable-2.10' into stable-2.11
[ganeti-github.git] / lib / backend.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Functions used by the node daemon
32
33 @var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
34 the L{UploadFile} function
35 @var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
36 in the L{_CleanDirectory} function
37
38 """
39
40 # pylint: disable=E1103,C0302
41
42 # E1103: %s %r has no %r member (but some types could not be
43 # inferred), because the _TryOSFromDisk returns either (True, os_obj)
44 # or (False, "string") which confuses pylint
45
46 # C0302: This module has become too big and should be split up
47
48
49 import os
50 import os.path
51 import shutil
52 import time
53 import stat
54 import errno
55 import re
56 import random
57 import logging
58 import tempfile
59 import zlib
60 import base64
61 import signal
62
63 from ganeti import errors
64 from ganeti import utils
65 from ganeti import ssh
66 from ganeti import hypervisor
67 from ganeti.hypervisor import hv_base
68 from ganeti import constants
69 from ganeti.storage import bdev
70 from ganeti.storage import drbd
71 from ganeti.storage import filestorage
72 from ganeti import objects
73 from ganeti import ssconf
74 from ganeti import serializer
75 from ganeti import netutils
76 from ganeti import runtime
77 from ganeti import compat
78 from ganeti import pathutils
79 from ganeti import vcluster
80 from ganeti import ht
81 from ganeti.storage.base import BlockDev
82 from ganeti.storage.drbd import DRBD8
83 from ganeti import hooksmaster
84
85
86 _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
87 _ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
88 pathutils.DATA_DIR,
89 pathutils.JOB_QUEUE_ARCHIVE_DIR,
90 pathutils.QUEUE_DIR,
91 pathutils.CRYPTO_KEYS_DIR,
92 ])
93 _MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
94 _X509_KEY_FILE = "key"
95 _X509_CERT_FILE = "cert"
96 _IES_STATUS_FILE = "status"
97 _IES_PID_FILE = "pid"
98 _IES_CA_FILE = "ca"
99
100 #: Valid LVS output line regex
101 _LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
102
103 # Actions for the master setup script
104 _MASTER_START = "start"
105 _MASTER_STOP = "stop"
106
107 #: Maximum file permissions for restricted command directory and executables
108 _RCMD_MAX_MODE = (stat.S_IRWXU |
109 stat.S_IRGRP | stat.S_IXGRP |
110 stat.S_IROTH | stat.S_IXOTH)
111
112 #: Delay before returning an error for restricted commands
113 _RCMD_INVALID_DELAY = 10
114
115 #: How long to wait to acquire lock for restricted commands (shorter than
116 #: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
117 #: command requests arrive
118 _RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
119
120
121 class RPCFail(Exception):
122 """Class denoting RPC failure.
123
124 Its argument is the error message.
125
126 """
127
128
129 def _GetInstReasonFilename(instance_name):
130 """Path of the file containing the reason of the instance status change.
131
132 @type instance_name: string
133 @param instance_name: The name of the instance
134 @rtype: string
135 @return: The path of the file
136
137 """
138 return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
139
140
141 def _StoreInstReasonTrail(instance_name, trail):
142 """Serialize a reason trail related to an instance change of state to file.
143
144 The exact location of the file depends on the name of the instance and on
145 the configuration of the Ganeti cluster defined at deploy time.
146
147 @type instance_name: string
148 @param instance_name: The name of the instance
149 @rtype: None
150
151 """
152 json = serializer.DumpJson(trail)
153 filename = _GetInstReasonFilename(instance_name)
154 utils.WriteFile(filename, data=json)
155
156
157 def _Fail(msg, *args, **kwargs):
158 """Log an error and the raise an RPCFail exception.
159
160 This exception is then handled specially in the ganeti daemon and
161 turned into a 'failed' return type. As such, this function is a
162 useful shortcut for logging the error and returning it to the master
163 daemon.
164
165 @type msg: string
166 @param msg: the text of the exception
167 @raise RPCFail
168
169 """
170 if args:
171 msg = msg % args
172 if "log" not in kwargs or kwargs["log"]: # if we should log this error
173 if "exc" in kwargs and kwargs["exc"]:
174 logging.exception(msg)
175 else:
176 logging.error(msg)
177 raise RPCFail(msg)
178
179
180 def _GetConfig():
181 """Simple wrapper to return a SimpleStore.
182
183 @rtype: L{ssconf.SimpleStore}
184 @return: a SimpleStore instance
185
186 """
187 return ssconf.SimpleStore()
188
189
190 def _GetSshRunner(cluster_name):
191 """Simple wrapper to return an SshRunner.
192
193 @type cluster_name: str
194 @param cluster_name: the cluster name, which is needed
195 by the SshRunner constructor
196 @rtype: L{ssh.SshRunner}
197 @return: an SshRunner instance
198
199 """
200 return ssh.SshRunner(cluster_name)
201
202
203 def _Decompress(data):
204 """Unpacks data compressed by the RPC client.
205
206 @type data: list or tuple
207 @param data: Data sent by RPC client
208 @rtype: str
209 @return: Decompressed data
210
211 """
212 assert isinstance(data, (list, tuple))
213 assert len(data) == 2
214 (encoding, content) = data
215 if encoding == constants.RPC_ENCODING_NONE:
216 return content
217 elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
218 return zlib.decompress(base64.b64decode(content))
219 else:
220 raise AssertionError("Unknown data encoding")
221
222
223 def _CleanDirectory(path, exclude=None):
224 """Removes all regular files in a directory.
225
226 @type path: str
227 @param path: the directory to clean
228 @type exclude: list
229 @param exclude: list of files to be excluded, defaults
230 to the empty list
231
232 """
233 if path not in _ALLOWED_CLEAN_DIRS:
234 _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
235 path)
236
237 if not os.path.isdir(path):
238 return
239 if exclude is None:
240 exclude = []
241 else:
242 # Normalize excluded paths
243 exclude = [os.path.normpath(i) for i in exclude]
244
245 for rel_name in utils.ListVisibleFiles(path):
246 full_name = utils.PathJoin(path, rel_name)
247 if full_name in exclude:
248 continue
249 if os.path.isfile(full_name) and not os.path.islink(full_name):
250 utils.RemoveFile(full_name)
251
252
253 def _BuildUploadFileList():
254 """Build the list of allowed upload files.
255
256 This is abstracted so that it's built only once at module import time.
257
258 """
259 allowed_files = set([
260 pathutils.CLUSTER_CONF_FILE,
261 pathutils.ETC_HOSTS,
262 pathutils.SSH_KNOWN_HOSTS_FILE,
263 pathutils.VNC_PASSWORD_FILE,
264 pathutils.RAPI_CERT_FILE,
265 pathutils.SPICE_CERT_FILE,
266 pathutils.SPICE_CACERT_FILE,
267 pathutils.RAPI_USERS_FILE,
268 pathutils.CONFD_HMAC_KEY,
269 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
270 ])
271
272 for hv_name in constants.HYPER_TYPES:
273 hv_class = hypervisor.GetHypervisorClass(hv_name)
274 allowed_files.update(hv_class.GetAncillaryFiles()[0])
275
276 assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
277 "Allowed file storage paths should never be uploaded via RPC"
278
279 return frozenset(allowed_files)
280
281
282 _ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
283
284
285 def JobQueuePurge():
286 """Removes job queue files and archived jobs.
287
288 @rtype: tuple
289 @return: True, None
290
291 """
292 _CleanDirectory(pathutils.QUEUE_DIR, exclude=[pathutils.JOB_QUEUE_LOCK_FILE])
293 _CleanDirectory(pathutils.JOB_QUEUE_ARCHIVE_DIR)
294
295
296 def GetMasterNodeName():
297 """Returns the master node name.
298
299 @rtype: string
300 @return: name of the master node
301 @raise RPCFail: in case of errors
302
303 """
304 try:
305 return _GetConfig().GetMasterNode()
306 except errors.ConfigurationError, err:
307 _Fail("Cluster configuration incomplete: %s", err, exc=True)
308
309
310 def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
311 """Decorator that runs hooks before and after the decorated function.
312
313 @type hook_opcode: string
314 @param hook_opcode: opcode of the hook
315 @type hooks_path: string
316 @param hooks_path: path of the hooks
317 @type env_builder_fn: function
318 @param env_builder_fn: function that returns a dictionary containing the
319 environment variables for the hooks. Will get all the parameters of the
320 decorated function.
321 @raise RPCFail: in case of pre-hook failure
322
323 """
324 def decorator(fn):
325 def wrapper(*args, **kwargs):
326 _, myself = ssconf.GetMasterAndMyself()
327 nodes = ([myself], [myself]) # these hooks run locally
328
329 env_fn = compat.partial(env_builder_fn, *args, **kwargs)
330
331 cfg = _GetConfig()
332 hr = HooksRunner()
333 hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
334 hr.RunLocalHooks, None, env_fn, None,
335 logging.warning, cfg.GetClusterName(),
336 cfg.GetMasterNode())
337 hm.RunPhase(constants.HOOKS_PHASE_PRE)
338 result = fn(*args, **kwargs)
339 hm.RunPhase(constants.HOOKS_PHASE_POST)
340
341 return result
342 return wrapper
343 return decorator
344
345
346 def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
347 """Builds environment variables for master IP hooks.
348
349 @type master_params: L{objects.MasterNetworkParameters}
350 @param master_params: network parameters of the master
351 @type use_external_mip_script: boolean
352 @param use_external_mip_script: whether to use an external master IP
353 address setup script (unused, but necessary per the implementation of the
354 _RunLocalHooks decorator)
355
356 """
357 # pylint: disable=W0613
358 ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
359 env = {
360 "MASTER_NETDEV": master_params.netdev,
361 "MASTER_IP": master_params.ip,
362 "MASTER_NETMASK": str(master_params.netmask),
363 "CLUSTER_IP_VERSION": str(ver),
364 }
365
366 return env
367
368
369 def _RunMasterSetupScript(master_params, action, use_external_mip_script):
370 """Execute the master IP address setup script.
371
372 @type master_params: L{objects.MasterNetworkParameters}
373 @param master_params: network parameters of the master
374 @type action: string
375 @param action: action to pass to the script. Must be one of
376 L{backend._MASTER_START} or L{backend._MASTER_STOP}
377 @type use_external_mip_script: boolean
378 @param use_external_mip_script: whether to use an external master IP
379 address setup script
380 @raise backend.RPCFail: if there are errors during the execution of the
381 script
382
383 """
384 env = _BuildMasterIpEnv(master_params)
385
386 if use_external_mip_script:
387 setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
388 else:
389 setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
390
391 result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
392
393 if result.failed:
394 _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
395 (action, result.exit_code, result.output), log=True)
396
397
398 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
399 _BuildMasterIpEnv)
400 def ActivateMasterIp(master_params, use_external_mip_script):
401 """Activate the IP address of the master daemon.
402
403 @type master_params: L{objects.MasterNetworkParameters}
404 @param master_params: network parameters of the master
405 @type use_external_mip_script: boolean
406 @param use_external_mip_script: whether to use an external master IP
407 address setup script
408 @raise RPCFail: in case of errors during the IP startup
409
410 """
411 _RunMasterSetupScript(master_params, _MASTER_START,
412 use_external_mip_script)
413
414
415 def StartMasterDaemons(no_voting):
416 """Activate local node as master node.
417
418 The function will start the master daemons (ganeti-masterd and ganeti-rapi).
419
420 @type no_voting: boolean
421 @param no_voting: whether to start ganeti-masterd without a node vote
422 but still non-interactively
423 @rtype: None
424
425 """
426
427 if no_voting:
428 masterd_args = "--no-voting --yes-do-it"
429 else:
430 masterd_args = ""
431
432 env = {
433 "EXTRA_MASTERD_ARGS": masterd_args,
434 }
435
436 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
437 if result.failed:
438 msg = "Can't start Ganeti master: %s" % result.output
439 logging.error(msg)
440 _Fail(msg)
441
442
443 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
444 _BuildMasterIpEnv)
445 def DeactivateMasterIp(master_params, use_external_mip_script):
446 """Deactivate the master IP on this node.
447
448 @type master_params: L{objects.MasterNetworkParameters}
449 @param master_params: network parameters of the master
450 @type use_external_mip_script: boolean
451 @param use_external_mip_script: whether to use an external master IP
452 address setup script
453 @raise RPCFail: in case of errors during the IP turndown
454
455 """
456 _RunMasterSetupScript(master_params, _MASTER_STOP,
457 use_external_mip_script)
458
459
460 def StopMasterDaemons():
461 """Stop the master daemons on this node.
462
463 Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
464
465 @rtype: None
466
467 """
468 # TODO: log and report back to the caller the error failures; we
469 # need to decide in which case we fail the RPC for this
470
471 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
472 if result.failed:
473 logging.error("Could not stop Ganeti master, command %s had exitcode %s"
474 " and error %s",
475 result.cmd, result.exit_code, result.output)
476
477
478 def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
479 """Change the netmask of the master IP.
480
481 @param old_netmask: the old value of the netmask
482 @param netmask: the new value of the netmask
483 @param master_ip: the master IP
484 @param master_netdev: the master network device
485
486 """
487 if old_netmask == netmask:
488 return
489
490 if not netutils.IPAddress.Own(master_ip):
491 _Fail("The master IP address is not up, not attempting to change its"
492 " netmask")
493
494 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
495 "%s/%s" % (master_ip, netmask),
496 "dev", master_netdev, "label",
497 "%s:0" % master_netdev])
498 if result.failed:
499 _Fail("Could not set the new netmask on the master IP address")
500
501 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
502 "%s/%s" % (master_ip, old_netmask),
503 "dev", master_netdev, "label",
504 "%s:0" % master_netdev])
505 if result.failed:
506 _Fail("Could not bring down the master IP address with the old netmask")
507
508
509 def EtcHostsModify(mode, host, ip):
510 """Modify a host entry in /etc/hosts.
511
512 @param mode: The mode to operate. Either add or remove entry
513 @param host: The host to operate on
514 @param ip: The ip associated with the entry
515
516 """
517 if mode == constants.ETC_HOSTS_ADD:
518 if not ip:
519 RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
520 " present")
521 utils.AddHostToEtcHosts(host, ip)
522 elif mode == constants.ETC_HOSTS_REMOVE:
523 if ip:
524 RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
525 " parameter is present")
526 utils.RemoveHostFromEtcHosts(host)
527 else:
528 RPCFail("Mode not supported")
529
530
531 def LeaveCluster(modify_ssh_setup):
532 """Cleans up and remove the current node.
533
534 This function cleans up and prepares the current node to be removed
535 from the cluster.
536
537 If processing is successful, then it raises an
538 L{errors.QuitGanetiException} which is used as a special case to
539 shutdown the node daemon.
540
541 @param modify_ssh_setup: boolean
542
543 """
544 _CleanDirectory(pathutils.DATA_DIR)
545 _CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
546 JobQueuePurge()
547
548 if modify_ssh_setup:
549 try:
550 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
551
552 utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
553
554 utils.RemoveFile(priv_key)
555 utils.RemoveFile(pub_key)
556 except errors.OpExecError:
557 logging.exception("Error while processing ssh files")
558
559 try:
560 utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
561 utils.RemoveFile(pathutils.RAPI_CERT_FILE)
562 utils.RemoveFile(pathutils.SPICE_CERT_FILE)
563 utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
564 utils.RemoveFile(pathutils.NODED_CERT_FILE)
565 except: # pylint: disable=W0702
566 logging.exception("Error while removing cluster secrets")
567
568 utils.StopDaemon(constants.CONFD)
569 utils.StopDaemon(constants.MOND)
570 utils.StopDaemon(constants.KVMD)
571
572 # Raise a custom exception (handled in ganeti-noded)
573 raise errors.QuitGanetiException(True, "Shutdown scheduled")
574
575
576 def _CheckStorageParams(params, num_params):
577 """Performs sanity checks for storage parameters.
578
579 @type params: list
580 @param params: list of storage parameters
581 @type num_params: int
582 @param num_params: expected number of parameters
583
584 """
585 if params is None:
586 raise errors.ProgrammerError("No storage parameters for storage"
587 " reporting is provided.")
588 if not isinstance(params, list):
589 raise errors.ProgrammerError("The storage parameters are not of type"
590 " list: '%s'" % params)
591 if not len(params) == num_params:
592 raise errors.ProgrammerError("Did not receive the expected number of"
593 "storage parameters: expected %s,"
594 " received '%s'" % (num_params, len(params)))
595
596
597 def _CheckLvmStorageParams(params):
598 """Performs sanity check for the 'exclusive storage' flag.
599
600 @see: C{_CheckStorageParams}
601
602 """
603 _CheckStorageParams(params, 1)
604 excl_stor = params[0]
605 if not isinstance(params[0], bool):
606 raise errors.ProgrammerError("Exclusive storage parameter is not"
607 " boolean: '%s'." % excl_stor)
608 return excl_stor
609
610
611 def _GetLvmVgSpaceInfo(name, params):
612 """Wrapper around C{_GetVgInfo} which checks the storage parameters.
613
614 @type name: string
615 @param name: name of the volume group
616 @type params: list
617 @param params: list of storage parameters, which in this case should be
618 containing only one for exclusive storage
619
620 """
621 excl_stor = _CheckLvmStorageParams(params)
622 return _GetVgInfo(name, excl_stor)
623
624
625 def _GetVgInfo(
626 name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
627 """Retrieves information about a LVM volume group.
628
629 """
630 # TODO: GetVGInfo supports returning information for multiple VGs at once
631 vginfo = info_fn([name], excl_stor)
632 if vginfo:
633 vg_free = int(round(vginfo[0][0], 0))
634 vg_size = int(round(vginfo[0][1], 0))
635 else:
636 vg_free = None
637 vg_size = None
638
639 return {
640 "type": constants.ST_LVM_VG,
641 "name": name,
642 "storage_free": vg_free,
643 "storage_size": vg_size,
644 }
645
646
647 def _GetLvmPvSpaceInfo(name, params):
648 """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
649
650 @see: C{_GetLvmVgSpaceInfo}
651
652 """
653 excl_stor = _CheckLvmStorageParams(params)
654 return _GetVgSpindlesInfo(name, excl_stor)
655
656
657 def _GetVgSpindlesInfo(
658 name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
659 """Retrieves information about spindles in an LVM volume group.
660
661 @type name: string
662 @param name: VG name
663 @type excl_stor: bool
664 @param excl_stor: exclusive storage
665 @rtype: dict
666 @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
667 free spindles, total spindles respectively
668
669 """
670 if excl_stor:
671 (vg_free, vg_size) = info_fn(name)
672 else:
673 vg_free = 0
674 vg_size = 0
675 return {
676 "type": constants.ST_LVM_PV,
677 "name": name,
678 "storage_free": vg_free,
679 "storage_size": vg_size,
680 }
681
682
683 def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
684 """Retrieves node information from a hypervisor.
685
686 The information returned depends on the hypervisor. Common items:
687
688 - vg_size is the size of the configured volume group in MiB
689 - vg_free is the free size of the volume group in MiB
690 - memory_dom0 is the memory allocated for domain0 in MiB
691 - memory_free is the currently available (free) ram in MiB
692 - memory_total is the total number of ram in MiB
693 - hv_version: the hypervisor version, if available
694
695 @type hvparams: dict of string
696 @param hvparams: the hypervisor's hvparams
697
698 """
699 return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
700
701
702 def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
703 """Retrieves node information for all hypervisors.
704
705 See C{_GetHvInfo} for information on the output.
706
707 @type hv_specs: list of pairs (string, dict of strings)
708 @param hv_specs: list of pairs of a hypervisor's name and its hvparams
709
710 """
711 if hv_specs is None:
712 return None
713
714 result = []
715 for hvname, hvparams in hv_specs:
716 result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
717 return result
718
719
720 def _GetNamedNodeInfo(names, fn):
721 """Calls C{fn} for all names in C{names} and returns a dictionary.
722
723 @rtype: None or dict
724
725 """
726 if names is None:
727 return None
728 else:
729 return map(fn, names)
730
731
732 def GetNodeInfo(storage_units, hv_specs):
733 """Gives back a hash with different information about the node.
734
735 @type storage_units: list of tuples (string, string, list)
736 @param storage_units: List of tuples (storage unit, identifier, parameters) to
737 ask for disk space information. In case of lvm-vg, the identifier is
738 the VG name. The parameters can contain additional, storage-type-specific
739 parameters, for example exclusive storage for lvm storage.
740 @type hv_specs: list of pairs (string, dict of strings)
741 @param hv_specs: list of pairs of a hypervisor's name and its hvparams
742 @rtype: tuple; (string, None/dict, None/dict)
743 @return: Tuple containing boot ID, volume group information and hypervisor
744 information
745
746 """
747 bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
748 storage_info = _GetNamedNodeInfo(
749 storage_units,
750 (lambda (storage_type, storage_key, storage_params):
751 _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
752 hv_info = _GetHvInfoAll(hv_specs)
753 return (bootid, storage_info, hv_info)
754
755
756 def _GetFileStorageSpaceInfo(path, params):
757 """Wrapper around filestorage.GetSpaceInfo.
758
759 The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
760 and ignore the *args parameter to not leak it into the filestorage
761 module's code.
762
763 @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
764 parameters.
765
766 """
767 _CheckStorageParams(params, 0)
768 return filestorage.GetFileStorageSpaceInfo(path)
769
770
771 # FIXME: implement storage reporting for all missing storage types.
772 _STORAGE_TYPE_INFO_FN = {
773 constants.ST_BLOCK: None,
774 constants.ST_DISKLESS: None,
775 constants.ST_EXT: None,
776 constants.ST_FILE: _GetFileStorageSpaceInfo,
777 constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
778 constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
779 constants.ST_SHARED_FILE: None,
780 constants.ST_RADOS: None,
781 }
782
783
784 def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
785 """Looks up and applies the correct function to calculate free and total
786 storage for the given storage type.
787
788 @type storage_type: string
789 @param storage_type: the storage type for which the storage shall be reported.
790 @type storage_key: string
791 @param storage_key: identifier of a storage unit, e.g. the volume group name
792 of an LVM storage unit
793 @type args: any
794 @param args: various parameters that can be used for storage reporting. These
795 parameters and their semantics vary from storage type to storage type and
796 are just propagated in this function.
797 @return: the results of the application of the storage space function (see
798 _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
799 storage type
800 @raises NotImplementedError: for storage types who don't support space
801 reporting yet
802 """
803 fn = _STORAGE_TYPE_INFO_FN[storage_type]
804 if fn is not None:
805 return fn(storage_key, *args)
806 else:
807 raise NotImplementedError
808
809
810 def _CheckExclusivePvs(pvi_list):
811 """Check that PVs are not shared among LVs
812
813 @type pvi_list: list of L{objects.LvmPvInfo} objects
814 @param pvi_list: information about the PVs
815
816 @rtype: list of tuples (string, list of strings)
817 @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
818
819 """
820 res = []
821 for pvi in pvi_list:
822 if len(pvi.lv_list) > 1:
823 res.append((pvi.name, pvi.lv_list))
824 return res
825
826
827 def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
828 get_hv_fn=hypervisor.GetHypervisor):
829 """Verifies the hypervisor. Appends the results to the 'results' list.
830
831 @type what: C{dict}
832 @param what: a dictionary of things to check
833 @type vm_capable: boolean
834 @param vm_capable: whether or not this node is vm capable
835 @type result: dict
836 @param result: dictionary of verification results; results of the
837 verifications in this function will be added here
838 @type all_hvparams: dict of dict of string
839 @param all_hvparams: dictionary mapping hypervisor names to hvparams
840 @type get_hv_fn: function
841 @param get_hv_fn: function to retrieve the hypervisor, to improve testability
842
843 """
844 if not vm_capable:
845 return
846
847 if constants.NV_HYPERVISOR in what:
848 result[constants.NV_HYPERVISOR] = {}
849 for hv_name in what[constants.NV_HYPERVISOR]:
850 hvparams = all_hvparams[hv_name]
851 try:
852 val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
853 except errors.HypervisorError, err:
854 val = "Error while checking hypervisor: %s" % str(err)
855 result[constants.NV_HYPERVISOR][hv_name] = val
856
857
858 def _VerifyHvparams(what, vm_capable, result,
859 get_hv_fn=hypervisor.GetHypervisor):
860 """Verifies the hvparams. Appends the results to the 'results' list.
861
862 @type what: C{dict}
863 @param what: a dictionary of things to check
864 @type vm_capable: boolean
865 @param vm_capable: whether or not this node is vm capable
866 @type result: dict
867 @param result: dictionary of verification results; results of the
868 verifications in this function will be added here
869 @type get_hv_fn: function
870 @param get_hv_fn: function to retrieve the hypervisor, to improve testability
871
872 """
873 if not vm_capable:
874 return
875
876 if constants.NV_HVPARAMS in what:
877 result[constants.NV_HVPARAMS] = []
878 for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
879 try:
880 logging.info("Validating hv %s, %s", hv_name, hvparms)
881 get_hv_fn(hv_name).ValidateParameters(hvparms)
882 except errors.HypervisorError, err:
883 result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
884
885
886 def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
887 """Verifies the instance list.
888
889 @type what: C{dict}
890 @param what: a dictionary of things to check
891 @type vm_capable: boolean
892 @param vm_capable: whether or not this node is vm capable
893 @type result: dict
894 @param result: dictionary of verification results; results of the
895 verifications in this function will be added here
896 @type all_hvparams: dict of dict of string
897 @param all_hvparams: dictionary mapping hypervisor names to hvparams
898
899 """
900 if constants.NV_INSTANCELIST in what and vm_capable:
901 # GetInstanceList can fail
902 try:
903 val = GetInstanceList(what[constants.NV_INSTANCELIST],
904 all_hvparams=all_hvparams)
905 except RPCFail, err:
906 val = str(err)
907 result[constants.NV_INSTANCELIST] = val
908
909
910 def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
911 """Verifies the node info.
912
913 @type what: C{dict}
914 @param what: a dictionary of things to check
915 @type vm_capable: boolean
916 @param vm_capable: whether or not this node is vm capable
917 @type result: dict
918 @param result: dictionary of verification results; results of the
919 verifications in this function will be added here
920 @type all_hvparams: dict of dict of string
921 @param all_hvparams: dictionary mapping hypervisor names to hvparams
922
923 """
924 if constants.NV_HVINFO in what and vm_capable:
925 hvname = what[constants.NV_HVINFO]
926 hyper = hypervisor.GetHypervisor(hvname)
927 hvparams = all_hvparams[hvname]
928 result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
929
930
931 def _VerifyClientCertificate(cert_file=pathutils.NODED_CLIENT_CERT_FILE):
932 """Verify the existance and validity of the client SSL certificate.
933
934 """
935 create_cert_cmd = "gnt-cluster renew-crypto --new-node-certificates"
936 if not os.path.exists(cert_file):
937 return (constants.CV_ERROR,
938 "The client certificate does not exist. Run '%s' to create"
939 " client certificates for all nodes." % create_cert_cmd)
940
941 (errcode, msg) = utils.VerifyCertificate(cert_file)
942 if errcode is not None:
943 return (errcode, msg)
944 else:
945 # if everything is fine, we return the digest to be compared to the config
946 return (None, utils.GetCertificateDigest(cert_filename=cert_file))
947
948
949 def VerifyNode(what, cluster_name, all_hvparams, node_groups, groups_cfg):
950 """Verify the status of the local node.
951
952 Based on the input L{what} parameter, various checks are done on the
953 local node.
954
955 If the I{filelist} key is present, this list of
956 files is checksummed and the file/checksum pairs are returned.
957
958 If the I{nodelist} key is present, we check that we have
959 connectivity via ssh with the target nodes (and check the hostname
960 report).
961
962 If the I{node-net-test} key is present, we check that we have
963 connectivity to the given nodes via both primary IP and, if
964 applicable, secondary IPs.
965
966 @type what: C{dict}
967 @param what: a dictionary of things to check:
968 - filelist: list of files for which to compute checksums
969 - nodelist: list of nodes we should check ssh communication with
970 - node-net-test: list of nodes we should check node daemon port
971 connectivity with
972 - hypervisor: list with hypervisors to run the verify for
973 @type cluster_name: string
974 @param cluster_name: the cluster's name
975 @type all_hvparams: dict of dict of strings
976 @param all_hvparams: a dictionary mapping hypervisor names to hvparams
977 @type node_groups: a dict of strings
978 @param node_groups: node _names_ mapped to their group uuids (it's enough to
979 have only those nodes that are in `what["nodelist"]`)
980 @type groups_cfg: a dict of dict of strings
981 @param groups_cfg: a dictionary mapping group uuids to their configuration
982 @rtype: dict
983 @return: a dictionary with the same keys as the input dict, and
984 values representing the result of the checks
985
986 """
987 result = {}
988 my_name = netutils.Hostname.GetSysName()
989 port = netutils.GetDaemonPort(constants.NODED)
990 vm_capable = my_name not in what.get(constants.NV_NONVMNODES, [])
991
992 _VerifyHypervisors(what, vm_capable, result, all_hvparams)
993 _VerifyHvparams(what, vm_capable, result)
994
995 if constants.NV_FILELIST in what:
996 fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
997 what[constants.NV_FILELIST]))
998 result[constants.NV_FILELIST] = \
999 dict((vcluster.MakeVirtualPath(key), value)
1000 for (key, value) in fingerprints.items())
1001
1002 if constants.NV_CLIENT_CERT in what:
1003 result[constants.NV_CLIENT_CERT] = _VerifyClientCertificate()
1004
1005 if constants.NV_NODELIST in what:
1006 (nodes, bynode) = what[constants.NV_NODELIST]
1007
1008 # Add nodes from other groups (different for each node)
1009 try:
1010 nodes.extend(bynode[my_name])
1011 except KeyError:
1012 pass
1013
1014 # Use a random order
1015 random.shuffle(nodes)
1016
1017 # Try to contact all nodes
1018 val = {}
1019 for node in nodes:
1020 params = groups_cfg.get(node_groups.get(node))
1021 ssh_port = params["ndparams"].get(constants.ND_SSH_PORT)
1022 logging.debug("Ssh port %s (None = default) for node %s",
1023 str(ssh_port), node)
1024 success, message = _GetSshRunner(cluster_name). \
1025 VerifyNodeHostname(node, ssh_port)
1026 if not success:
1027 val[node] = message
1028
1029 result[constants.NV_NODELIST] = val
1030
1031 if constants.NV_NODENETTEST in what:
1032 result[constants.NV_NODENETTEST] = tmp = {}
1033 my_pip = my_sip = None
1034 for name, pip, sip in what[constants.NV_NODENETTEST]:
1035 if name == my_name:
1036 my_pip = pip
1037 my_sip = sip
1038 break
1039 if not my_pip:
1040 tmp[my_name] = ("Can't find my own primary/secondary IP"
1041 " in the node list")
1042 else:
1043 for name, pip, sip in what[constants.NV_NODENETTEST]:
1044 fail = []
1045 if not netutils.TcpPing(pip, port, source=my_pip):
1046 fail.append("primary")
1047 if sip != pip:
1048 if not netutils.TcpPing(sip, port, source=my_sip):
1049 fail.append("secondary")
1050 if fail:
1051 tmp[name] = ("failure using the %s interface(s)" %
1052 " and ".join(fail))
1053
1054 if constants.NV_MASTERIP in what:
1055 # FIXME: add checks on incoming data structures (here and in the
1056 # rest of the function)
1057 master_name, master_ip = what[constants.NV_MASTERIP]
1058 if master_name == my_name:
1059 source = constants.IP4_ADDRESS_LOCALHOST
1060 else:
1061 source = None
1062 result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
1063 source=source)
1064
1065 if constants.NV_USERSCRIPTS in what:
1066 result[constants.NV_USERSCRIPTS] = \
1067 [script for script in what[constants.NV_USERSCRIPTS]
1068 if not utils.IsExecutable(script)]
1069
1070 if constants.NV_OOB_PATHS in what:
1071 result[constants.NV_OOB_PATHS] = tmp = []
1072 for path in what[constants.NV_OOB_PATHS]:
1073 try:
1074 st = os.stat(path)
1075 except OSError, err:
1076 tmp.append("error stating out of band helper: %s" % err)
1077 else:
1078 if stat.S_ISREG(st.st_mode):
1079 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
1080 tmp.append(None)
1081 else:
1082 tmp.append("out of band helper %s is not executable" % path)
1083 else:
1084 tmp.append("out of band helper %s is not a file" % path)
1085
1086 if constants.NV_LVLIST in what and vm_capable:
1087 try:
1088 val = GetVolumeList([what[constants.NV_LVLIST]])
1089 except RPCFail, err:
1090 val = str(err)
1091 result[constants.NV_LVLIST] = val
1092
1093 _VerifyInstanceList(what, vm_capable, result, all_hvparams)
1094
1095 if constants.NV_VGLIST in what and vm_capable:
1096 result[constants.NV_VGLIST] = utils.ListVolumeGroups()
1097
1098 if constants.NV_PVLIST in what and vm_capable:
1099 check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
1100 val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
1101 filter_allocatable=False,
1102 include_lvs=check_exclusive_pvs)
1103 if check_exclusive_pvs:
1104 result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
1105 for pvi in val:
1106 # Avoid sending useless data on the wire
1107 pvi.lv_list = []
1108 result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
1109
1110 if constants.NV_VERSION in what:
1111 result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
1112 constants.RELEASE_VERSION)
1113
1114 _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
1115
1116 if constants.NV_DRBDVERSION in what and vm_capable:
1117 try:
1118 drbd_version = DRBD8.GetProcInfo().GetVersionString()
1119 except errors.BlockDeviceError, err:
1120 logging.warning("Can't get DRBD version", exc_info=True)
1121 drbd_version = str(err)
1122 result[constants.NV_DRBDVERSION] = drbd_version
1123
1124 if constants.NV_DRBDLIST in what and vm_capable:
1125 try:
1126 used_minors = drbd.DRBD8.GetUsedDevs()
1127 except errors.BlockDeviceError, err:
1128 logging.warning("Can't get used minors list", exc_info=True)
1129 used_minors = str(err)
1130 result[constants.NV_DRBDLIST] = used_minors
1131
1132 if constants.NV_DRBDHELPER in what and vm_capable:
1133 status = True
1134 try:
1135 payload = drbd.DRBD8.GetUsermodeHelper()
1136 except errors.BlockDeviceError, err:
1137 logging.error("Can't get DRBD usermode helper: %s", str(err))
1138 status = False
1139 payload = str(err)
1140 result[constants.NV_DRBDHELPER] = (status, payload)
1141
1142 if constants.NV_NODESETUP in what:
1143 result[constants.NV_NODESETUP] = tmpr = []
1144 if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
1145 tmpr.append("The sysfs filesytem doesn't seem to be mounted"
1146 " under /sys, missing required directories /sys/block"
1147 " and /sys/class/net")
1148 if (not os.path.isdir("/proc/sys") or
1149 not os.path.isfile("/proc/sysrq-trigger")):
1150 tmpr.append("The procfs filesystem doesn't seem to be mounted"
1151 " under /proc, missing required directory /proc/sys and"
1152 " the file /proc/sysrq-trigger")
1153
1154 if constants.NV_TIME in what:
1155 result[constants.NV_TIME] = utils.SplitTime(time.time())
1156
1157 if constants.NV_OSLIST in what and vm_capable:
1158 result[constants.NV_OSLIST] = DiagnoseOS()
1159
1160 if constants.NV_BRIDGES in what and vm_capable:
1161 result[constants.NV_BRIDGES] = [bridge
1162 for bridge in what[constants.NV_BRIDGES]
1163 if not utils.BridgeExists(bridge)]
1164
1165 if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
1166 result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
1167 filestorage.ComputeWrongFileStoragePaths()
1168
1169 if what.get(constants.NV_FILE_STORAGE_PATH):
1170 pathresult = filestorage.CheckFileStoragePath(
1171 what[constants.NV_FILE_STORAGE_PATH])
1172 if pathresult:
1173 result[constants.NV_FILE_STORAGE_PATH] = pathresult
1174
1175 if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
1176 pathresult = filestorage.CheckFileStoragePath(
1177 what[constants.NV_SHARED_FILE_STORAGE_PATH])
1178 if pathresult:
1179 result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
1180
1181 return result
1182
1183
1184 def GetCryptoTokens(token_requests):
1185 """Perform actions on the node's cryptographic tokens.
1186
1187 Token types can be 'ssl' or 'ssh'. So far only some actions are implemented
1188 for 'ssl'. Action 'get' returns the digest of the public client ssl
1189 certificate. Action 'create' creates a new client certificate and private key
1190 and also returns the digest of the certificate. The third parameter of a
1191 token request are optional parameters for the actions, so far only the
1192 filename is supported.
1193
1194 @type token_requests: list of tuples of (string, string, dict), where the
1195 first string is in constants.CRYPTO_TYPES, the second in
1196 constants.CRYPTO_ACTIONS. The third parameter is a dictionary of string
1197 to string.
1198 @param token_requests: list of requests of cryptographic tokens and actions
1199 to perform on them. The actions come with a dictionary of options.
1200 @rtype: list of tuples (string, string)
1201 @return: list of tuples of the token type and the public crypto token
1202
1203 """
1204 getents = runtime.GetEnts()
1205 _VALID_CERT_FILES = [pathutils.NODED_CERT_FILE,
1206 pathutils.NODED_CLIENT_CERT_FILE,
1207 pathutils.NODED_CLIENT_CERT_FILE_TMP]
1208 _DEFAULT_CERT_FILE = pathutils.NODED_CLIENT_CERT_FILE
1209 tokens = []
1210 for (token_type, action, options) in token_requests:
1211 if token_type not in constants.CRYPTO_TYPES:
1212 raise errors.ProgrammerError("Token type '%s' not supported." %
1213 token_type)
1214 if action not in constants.CRYPTO_ACTIONS:
1215 raise errors.ProgrammerError("Action '%s' is not supported." %
1216 action)
1217 if token_type == constants.CRYPTO_TYPE_SSL_DIGEST:
1218 if action == constants.CRYPTO_ACTION_CREATE:
1219
1220 # extract file name from options
1221 cert_filename = None
1222 if options:
1223 cert_filename = options.get(constants.CRYPTO_OPTION_CERT_FILE)
1224 if not cert_filename:
1225 cert_filename = _DEFAULT_CERT_FILE
1226 # For security reason, we don't allow arbitrary filenames
1227 if not cert_filename in _VALID_CERT_FILES:
1228 raise errors.ProgrammerError(
1229 "The certificate file name path '%s' is not allowed." %
1230 cert_filename)
1231
1232 # extract serial number from options
1233 serial_no = None
1234 if options:
1235 try:
1236 serial_no = int(options[constants.CRYPTO_OPTION_SERIAL_NO])
1237 except ValueError:
1238 raise errors.ProgrammerError(
1239 "The given serial number is not an intenger: %s." %
1240 options.get(constants.CRYPTO_OPTION_SERIAL_NO))
1241 except KeyError:
1242 raise errors.ProgrammerError("No serial number was provided.")
1243
1244 if not serial_no:
1245 raise errors.ProgrammerError(
1246 "Cannot create an SSL certificate without a serial no.")
1247
1248 utils.GenerateNewSslCert(
1249 True, cert_filename, serial_no,
1250 "Create new client SSL certificate in %s." % cert_filename,
1251 uid=getents.masterd_uid, gid=getents.masterd_gid)
1252 tokens.append((token_type,
1253 utils.GetCertificateDigest(
1254 cert_filename=cert_filename)))
1255 elif action == constants.CRYPTO_ACTION_GET:
1256 tokens.append((token_type,
1257 utils.GetCertificateDigest()))
1258 return tokens
1259
1260
1261 def EnsureDaemon(daemon_name, run):
1262 """Ensures the given daemon is running or stopped.
1263
1264 @type daemon_name: string
1265 @param daemon_name: name of the daemon (e.g., constants.KVMD)
1266
1267 @type run: bool
1268 @param run: whether to start or stop the daemon
1269
1270 @rtype: bool
1271 @return: 'True' if daemon successfully started/stopped,
1272 'False' otherwise
1273
1274 """
1275 allowed_daemons = [constants.KVMD]
1276
1277 if daemon_name not in allowed_daemons:
1278 fn = lambda _: False
1279 elif run:
1280 fn = utils.EnsureDaemon
1281 else:
1282 fn = utils.StopDaemon
1283
1284 return fn(daemon_name)
1285
1286
1287 def GetBlockDevSizes(devices):
1288 """Return the size of the given block devices
1289
1290 @type devices: list
1291 @param devices: list of block device nodes to query
1292 @rtype: dict
1293 @return:
1294 dictionary of all block devices under /dev (key). The value is their
1295 size in MiB.
1296
1297 {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
1298
1299 """
1300 DEV_PREFIX = "/dev/"
1301 blockdevs = {}
1302
1303 for devpath in devices:
1304 if not utils.IsBelowDir(DEV_PREFIX, devpath):
1305 continue
1306
1307 try:
1308 st = os.stat(devpath)
1309 except EnvironmentError, err:
1310 logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
1311 continue
1312
1313 if stat.S_ISBLK(st.st_mode):
1314 result = utils.RunCmd(["blockdev", "--getsize64", devpath])
1315 if result.failed:
1316 # We don't want to fail, just do not list this device as available
1317 logging.warning("Cannot get size for block device %s", devpath)
1318 continue
1319
1320 size = int(result.stdout) / (1024 * 1024)
1321 blockdevs[devpath] = size
1322 return blockdevs
1323
1324
1325 def GetVolumeList(vg_names):
1326 """Compute list of logical volumes and their size.
1327
1328 @type vg_names: list
1329 @param vg_names: the volume groups whose LVs we should list, or
1330 empty for all volume groups
1331 @rtype: dict
1332 @return:
1333 dictionary of all partions (key) with value being a tuple of
1334 their size (in MiB), inactive and online status::
1335
1336 {'xenvg/test1': ('20.06', True, True)}
1337
1338 in case of errors, a string is returned with the error
1339 details.
1340
1341 """
1342 lvs = {}
1343 sep = "|"
1344 if not vg_names:
1345 vg_names = []
1346 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1347 "--separator=%s" % sep,
1348 "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
1349 if result.failed:
1350 _Fail("Failed to list logical volumes, lvs output: %s", result.output)
1351
1352 for line in result.stdout.splitlines():
1353 line = line.strip()
1354 match = _LVSLINE_REGEX.match(line)
1355 if not match:
1356 logging.error("Invalid line returned from lvs output: '%s'", line)
1357 continue
1358 vg_name, name, size, attr = match.groups()
1359 inactive = attr[4] == "-"
1360 online = attr[5] == "o"
1361 virtual = attr[0] == "v"
1362 if virtual:
1363 # we don't want to report such volumes as existing, since they
1364 # don't really hold data
1365 continue
1366 lvs[vg_name + "/" + name] = (size, inactive, online)
1367
1368 return lvs
1369
1370
1371 def ListVolumeGroups():
1372 """List the volume groups and their size.
1373
1374 @rtype: dict
1375 @return: dictionary with keys volume name and values the
1376 size of the volume
1377
1378 """
1379 return utils.ListVolumeGroups()
1380
1381
1382 def NodeVolumes():
1383 """List all volumes on this node.
1384
1385 @rtype: list
1386 @return:
1387 A list of dictionaries, each having four keys:
1388 - name: the logical volume name,
1389 - size: the size of the logical volume
1390 - dev: the physical device on which the LV lives
1391 - vg: the volume group to which it belongs
1392
1393 In case of errors, we return an empty list and log the
1394 error.
1395
1396 Note that since a logical volume can live on multiple physical
1397 volumes, the resulting list might include a logical volume
1398 multiple times.
1399
1400 """
1401 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1402 "--separator=|",
1403 "--options=lv_name,lv_size,devices,vg_name"])
1404 if result.failed:
1405 _Fail("Failed to list logical volumes, lvs output: %s",
1406 result.output)
1407
1408 def parse_dev(dev):
1409 return dev.split("(")[0]
1410
1411 def handle_dev(dev):
1412 return [parse_dev(x) for x in dev.split(",")]
1413
1414 def map_line(line):
1415 line = [v.strip() for v in line]
1416 return [{"name": line[0], "size": line[1],
1417 "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1418
1419 all_devs = []
1420 for line in result.stdout.splitlines():
1421 if line.count("|") >= 3:
1422 all_devs.extend(map_line(line.split("|")))
1423 else:
1424 logging.warning("Strange line in the output from lvs: '%s'", line)
1425 return all_devs
1426
1427
1428 def BridgesExist(bridges_list):
1429 """Check if a list of bridges exist on the current node.
1430
1431 @rtype: boolean
1432 @return: C{True} if all of them exist, C{False} otherwise
1433
1434 """
1435 missing = []
1436 for bridge in bridges_list:
1437 if not utils.BridgeExists(bridge):
1438 missing.append(bridge)
1439
1440 if missing:
1441 _Fail("Missing bridges %s", utils.CommaJoin(missing))
1442
1443
1444 def GetInstanceListForHypervisor(hname, hvparams=None,
1445 get_hv_fn=hypervisor.GetHypervisor):
1446 """Provides a list of instances of the given hypervisor.
1447
1448 @type hname: string
1449 @param hname: name of the hypervisor
1450 @type hvparams: dict of strings
1451 @param hvparams: hypervisor parameters for the given hypervisor
1452 @type get_hv_fn: function
1453 @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1454 name; optional parameter to increase testability
1455
1456 @rtype: list
1457 @return: a list of all running instances on the current node
1458 - instance1.example.com
1459 - instance2.example.com
1460
1461 """
1462 try:
1463 return get_hv_fn(hname).ListInstances(hvparams=hvparams)
1464 except errors.HypervisorError, err:
1465 _Fail("Error enumerating instances (hypervisor %s): %s",
1466 hname, err, exc=True)
1467
1468
1469 def GetInstanceList(hypervisor_list, all_hvparams=None,
1470 get_hv_fn=hypervisor.GetHypervisor):
1471 """Provides a list of instances.
1472
1473 @type hypervisor_list: list
1474 @param hypervisor_list: the list of hypervisors to query information
1475 @type all_hvparams: dict of dict of strings
1476 @param all_hvparams: a dictionary mapping hypervisor types to respective
1477 cluster-wide hypervisor parameters
1478 @type get_hv_fn: function
1479 @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1480 name; optional parameter to increase testability
1481
1482 @rtype: list
1483 @return: a list of all running instances on the current node
1484 - instance1.example.com
1485 - instance2.example.com
1486
1487 """
1488 results = []
1489 for hname in hypervisor_list:
1490 hvparams = all_hvparams[hname]
1491 results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
1492 get_hv_fn=get_hv_fn))
1493 return results
1494
1495
1496 def GetInstanceInfo(instance, hname, hvparams=None):
1497 """Gives back the information about an instance as a dictionary.
1498
1499 @type instance: string
1500 @param instance: the instance name
1501 @type hname: string
1502 @param hname: the hypervisor type of the instance
1503 @type hvparams: dict of strings
1504 @param hvparams: the instance's hvparams
1505
1506 @rtype: dict
1507 @return: dictionary with the following keys:
1508 - memory: memory size of instance (int)
1509 - state: state of instance (HvInstanceState)
1510 - time: cpu time of instance (float)
1511 - vcpus: the number of vcpus (int)
1512
1513 """
1514 output = {}
1515
1516 iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
1517 hvparams=hvparams)
1518 if iinfo is not None:
1519 output["memory"] = iinfo[2]
1520 output["vcpus"] = iinfo[3]
1521 output["state"] = iinfo[4]
1522 output["time"] = iinfo[5]
1523
1524 return output
1525
1526
1527 def GetInstanceMigratable(instance):
1528 """Computes whether an instance can be migrated.
1529
1530 @type instance: L{objects.Instance}
1531 @param instance: object representing the instance to be checked.
1532
1533 @rtype: tuple
1534 @return: tuple of (result, description) where:
1535 - result: whether the instance can be migrated or not
1536 - description: a description of the issue, if relevant
1537
1538 """
1539 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1540 iname = instance.name
1541 if iname not in hyper.ListInstances(hvparams=instance.hvparams):
1542 _Fail("Instance %s is not running", iname)
1543
1544 for idx in range(len(instance.disks)):
1545 link_name = _GetBlockDevSymlinkPath(iname, idx)
1546 if not os.path.islink(link_name):
1547 logging.warning("Instance %s is missing symlink %s for disk %d",
1548 iname, link_name, idx)
1549
1550
1551 def GetAllInstancesInfo(hypervisor_list, all_hvparams):
1552 """Gather data about all instances.
1553
1554 This is the equivalent of L{GetInstanceInfo}, except that it
1555 computes data for all instances at once, thus being faster if one
1556 needs data about more than one instance.
1557
1558 @type hypervisor_list: list
1559 @param hypervisor_list: list of hypervisors to query for instance data
1560 @type all_hvparams: dict of dict of strings
1561 @param all_hvparams: mapping of hypervisor names to hvparams
1562
1563 @rtype: dict
1564 @return: dictionary of instance: data, with data having the following keys:
1565 - memory: memory size of instance (int)
1566 - state: xen state of instance (string)
1567 - time: cpu time of instance (float)
1568 - vcpus: the number of vcpus
1569
1570 """
1571 output = {}
1572 for hname in hypervisor_list:
1573 hvparams = all_hvparams[hname]
1574 iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
1575 if iinfo:
1576 for name, _, memory, vcpus, state, times in iinfo:
1577 value = {
1578 "memory": memory,
1579 "vcpus": vcpus,
1580 "state": state,
1581 "time": times,
1582 }
1583 if name in output:
1584 # we only check static parameters, like memory and vcpus,
1585 # and not state and time which can change between the
1586 # invocations of the different hypervisors
1587 for key in "memory", "vcpus":
1588 if value[key] != output[name][key]:
1589 _Fail("Instance %s is running twice"
1590 " with different parameters", name)
1591 output[name] = value
1592
1593 return output
1594
1595
1596 def GetInstanceConsoleInfo(instance_param_dict,
1597 get_hv_fn=hypervisor.GetHypervisor):
1598 """Gather data about the console access of a set of instances of this node.
1599
1600 This function assumes that the caller already knows which instances are on
1601 this node, by calling a function such as L{GetAllInstancesInfo} or
1602 L{GetInstanceList}.
1603
1604 For every instance, a large amount of configuration data needs to be
1605 provided to the hypervisor interface in order to receive the console
1606 information. Whether this could or should be cut down can be discussed.
1607 The information is provided in a dictionary indexed by instance name,
1608 allowing any number of instance queries to be done.
1609
1610 @type instance_param_dict: dict of string to tuple of dictionaries, where the
1611 dictionaries represent: L{objects.Instance}, L{objects.Node},
1612 L{objects.NodeGroup}, HvParams, BeParams
1613 @param instance_param_dict: mapping of instance name to parameters necessary
1614 for console information retrieval
1615
1616 @rtype: dict
1617 @return: dictionary of instance: data, with data having the following keys:
1618 - instance: instance name
1619 - kind: console kind
1620 - message: used with kind == CONS_MESSAGE, indicates console to be
1621 unavailable, supplies error message
1622 - host: host to connect to
1623 - port: port to use
1624 - user: user for login
1625 - command: the command, broken into parts as an array
1626 - display: unknown, potentially unused?
1627
1628 """
1629
1630 output = {}
1631 for inst_name in instance_param_dict:
1632 instance = instance_param_dict[inst_name]["instance"]
1633 pnode = instance_param_dict[inst_name]["node"]
1634 group = instance_param_dict[inst_name]["group"]
1635 hvparams = instance_param_dict[inst_name]["hvParams"]
1636 beparams = instance_param_dict[inst_name]["beParams"]
1637
1638 instance = objects.Instance.FromDict(instance)
1639 pnode = objects.Node.FromDict(pnode)
1640 group = objects.NodeGroup.FromDict(group)
1641
1642 h = get_hv_fn(instance.hypervisor)
1643 output[inst_name] = h.GetInstanceConsole(instance, pnode, group,
1644 hvparams, beparams).ToDict()
1645
1646 return output
1647
1648
1649 def _InstanceLogName(kind, os_name, instance, component):
1650 """Compute the OS log filename for a given instance and operation.
1651
1652 The instance name and os name are passed in as strings since not all
1653 operations have these as part of an instance object.
1654
1655 @type kind: string
1656 @param kind: the operation type (e.g. add, import, etc.)
1657 @type os_name: string
1658 @param os_name: the os name
1659 @type instance: string
1660 @param instance: the name of the instance being imported/added/etc.
1661 @type component: string or None
1662 @param component: the name of the component of the instance being
1663 transferred
1664
1665 """
1666 # TODO: Use tempfile.mkstemp to create unique filename
1667 if component:
1668 assert "/" not in component
1669 c_msg = "-%s" % component
1670 else:
1671 c_msg = ""
1672 base = ("%s-%s-%s%s-%s.log" %
1673 (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1674 return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1675
1676
1677 def InstanceOsAdd(instance, reinstall, debug):
1678 """Add an OS to an instance.
1679
1680 @type instance: L{objects.Instance}
1681 @param instance: Instance whose OS is to be installed
1682 @type reinstall: boolean
1683 @param reinstall: whether this is an instance reinstall
1684 @type debug: integer
1685 @param debug: debug level, passed to the OS scripts
1686 @rtype: None
1687
1688 """
1689 inst_os = OSFromDisk(instance.os)
1690
1691 create_env = OSEnvironment(instance, inst_os, debug)
1692 if reinstall:
1693 create_env["INSTANCE_REINSTALL"] = "1"
1694
1695 logfile = _InstanceLogName("add", instance.os, instance.name, None)
1696
1697 result = utils.RunCmd([inst_os.create_script], env=create_env,
1698 cwd=inst_os.path, output=logfile, reset_env=True)
1699 if result.failed:
1700 logging.error("os create command '%s' returned error: %s, logfile: %s,"
1701 " output: %s", result.cmd, result.fail_reason, logfile,
1702 result.output)
1703 lines = [utils.SafeEncode(val)
1704 for val in utils.TailFile(logfile, lines=20)]
1705 _Fail("OS create script failed (%s), last lines in the"
1706 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1707
1708
1709 def RunRenameInstance(instance, old_name, debug):
1710 """Run the OS rename script for an instance.
1711
1712 @type instance: L{objects.Instance}
1713 @param instance: Instance whose OS is to be installed
1714 @type old_name: string
1715 @param old_name: previous instance name
1716 @type debug: integer
1717 @param debug: debug level, passed to the OS scripts
1718 @rtype: boolean
1719 @return: the success of the operation
1720
1721 """
1722 inst_os = OSFromDisk(instance.os)
1723
1724 rename_env = OSEnvironment(instance, inst_os, debug)
1725 rename_env["OLD_INSTANCE_NAME"] = old_name
1726
1727 logfile = _InstanceLogName("rename", instance.os,
1728 "%s-%s" % (old_name, instance.name), None)
1729
1730 result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1731 cwd=inst_os.path, output=logfile, reset_env=True)
1732
1733 if result.failed:
1734 logging.error("os create command '%s' returned error: %s output: %s",
1735 result.cmd, result.fail_reason, result.output)
1736 lines = [utils.SafeEncode(val)
1737 for val in utils.TailFile(logfile, lines=20)]
1738 _Fail("OS rename script failed (%s), last lines in the"
1739 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1740
1741
1742 def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
1743 """Returns symlink path for block device.
1744
1745 """
1746 if _dir is None:
1747 _dir = pathutils.DISK_LINKS_DIR
1748
1749 return utils.PathJoin(_dir,
1750 ("%s%s%s" %
1751 (instance_name, constants.DISK_SEPARATOR, idx)))
1752
1753
1754 def _SymlinkBlockDev(instance_name, device_path, idx):
1755 """Set up symlinks to a instance's block device.
1756
1757 This is an auxiliary function run when an instance is start (on the primary
1758 node) or when an instance is migrated (on the target node).
1759
1760
1761 @param instance_name: the name of the target instance
1762 @param device_path: path of the physical block device, on the node
1763 @param idx: the disk index
1764 @return: absolute path to the disk's symlink
1765
1766 """
1767 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1768 try:
1769 os.symlink(device_path, link_name)
1770 except OSError, err:
1771 if err.errno == errno.EEXIST:
1772 if (not os.path.islink(link_name) or
1773 os.readlink(link_name) != device_path):
1774 os.remove(link_name)
1775 os.symlink(device_path, link_name)
1776 else:
1777 raise
1778
1779 return link_name
1780
1781
1782 def _RemoveBlockDevLinks(instance_name, disks):
1783 """Remove the block device symlinks belonging to the given instance.
1784
1785 """
1786 for idx, _ in enumerate(disks):
1787 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1788 if os.path.islink(link_name):
1789 try:
1790 os.remove(link_name)
1791 except OSError:
1792 logging.exception("Can't remove symlink '%s'", link_name)
1793
1794
1795 def _CalculateDeviceURI(instance, disk, device):
1796 """Get the URI for the device.
1797
1798 @type instance: L{objects.Instance}
1799 @param instance: the instance which disk belongs to
1800 @type disk: L{objects.Disk}
1801 @param disk: the target disk object
1802 @type device: L{bdev.BlockDev}
1803 @param device: the corresponding BlockDevice
1804 @rtype: string
1805 @return: the device uri if any else None
1806
1807 """
1808 access_mode = disk.params.get(constants.LDP_ACCESS,
1809 constants.DISK_KERNELSPACE)
1810 if access_mode == constants.DISK_USERSPACE:
1811 # This can raise errors.BlockDeviceError
1812 return device.GetUserspaceAccessUri(instance.hypervisor)
1813 else:
1814 return None
1815
1816
1817 def _GatherAndLinkBlockDevs(instance):
1818 """Set up an instance's block device(s).
1819
1820 This is run on the primary node at instance startup. The block
1821 devices must be already assembled.
1822
1823 @type instance: L{objects.Instance}
1824 @param instance: the instance whose disks we should assemble
1825 @rtype: list
1826 @return: list of (disk_object, link_name, drive_uri)
1827
1828 """
1829 block_devices = []
1830 for idx, disk in enumerate(instance.disks):
1831 device = _RecursiveFindBD(disk)
1832 if device is None:
1833 raise errors.BlockDeviceError("Block device '%s' is not set up." %
1834 str(disk))
1835 device.Open()
1836 try:
1837 link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1838 except OSError, e:
1839 raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1840 e.strerror)
1841 uri = _CalculateDeviceURI(instance, disk, device)
1842
1843 block_devices.append((disk, link_name, uri))
1844
1845 return block_devices
1846
1847
1848 def _IsInstanceUserDown(instance_info):
1849 return instance_info and \
1850 "state" in instance_info and \
1851 hv_base.HvInstanceState.IsShutdown(instance_info["state"])
1852
1853
1854 def _GetInstanceInfo(instance):
1855 """Helper function L{GetInstanceInfo}"""
1856 return GetInstanceInfo(instance.name, instance.hypervisor,
1857 hvparams=instance.hvparams)
1858
1859
1860 def StartInstance(instance, startup_paused, reason, store_reason=True):
1861 """Start an instance.
1862
1863 @type instance: L{objects.Instance}
1864 @param instance: the instance object
1865 @type startup_paused: bool
1866 @param instance: pause instance at startup?
1867 @type reason: list of reasons
1868 @param reason: the reason trail for this startup
1869 @type store_reason: boolean
1870 @param store_reason: whether to store the shutdown reason trail on file
1871 @rtype: None
1872
1873 """
1874 instance_info = _GetInstanceInfo(instance)
1875
1876 if instance_info and not _IsInstanceUserDown(instance_info):
1877 logging.info("Instance '%s' already running, not starting", instance.name)
1878 return
1879
1880 try:
1881 block_devices = _GatherAndLinkBlockDevs(instance)
1882 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1883 hyper.StartInstance(instance, block_devices, startup_paused)
1884 if store_reason:
1885 _StoreInstReasonTrail(instance.name, reason)
1886 except errors.BlockDeviceError, err:
1887 _Fail("Block device error: %s", err, exc=True)
1888 except errors.HypervisorError, err:
1889 _RemoveBlockDevLinks(instance.name, instance.disks)
1890 _Fail("Hypervisor error: %s", err, exc=True)
1891
1892
1893 def InstanceShutdown(instance, timeout, reason, store_reason=True):
1894 """Shut an instance down.
1895
1896 @note: this functions uses polling with a hardcoded timeout.
1897
1898 @type instance: L{objects.Instance}
1899 @param instance: the instance object
1900 @type timeout: integer
1901 @param timeout: maximum timeout for soft shutdown
1902 @type reason: list of reasons
1903 @param reason: the reason trail for this shutdown
1904 @type store_reason: boolean
1905 @param store_reason: whether to store the shutdown reason trail on file
1906 @rtype: None
1907
1908 """
1909 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1910
1911 if not _GetInstanceInfo(instance):
1912 logging.info("Instance '%s' not running, doing nothing", instance.name)
1913 return
1914
1915 class _TryShutdown(object):
1916 def __init__(self):
1917 self.tried_once = False
1918
1919 def __call__(self):
1920 if not _GetInstanceInfo(instance):
1921 return
1922
1923 try:
1924 hyper.StopInstance(instance, retry=self.tried_once, timeout=timeout)
1925 if store_reason:
1926 _StoreInstReasonTrail(instance.name, reason)
1927 except errors.HypervisorError, err:
1928 # if the instance is no longer existing, consider this a
1929 # success and go to cleanup
1930 if not _GetInstanceInfo(instance):
1931 return
1932
1933 _Fail("Failed to stop instance '%s': %s", instance.name, err)
1934
1935 self.tried_once = True
1936
1937 raise utils.RetryAgain()
1938
1939 try:
1940 utils.Retry(_TryShutdown(), 5, timeout)
1941 except utils.RetryTimeout:
1942 # the shutdown did not succeed
1943 logging.error("Shutdown of '%s' unsuccessful, forcing", instance.name)
1944
1945 try:
1946 hyper.StopInstance(instance, force=True)
1947 except errors.HypervisorError, err:
1948 # only raise an error if the instance still exists, otherwise
1949 # the error could simply be "instance ... unknown"!
1950 if _GetInstanceInfo(instance):
1951 _Fail("Failed to force stop instance '%s': %s", instance.name, err)
1952
1953 time.sleep(1)
1954
1955 if _GetInstanceInfo(instance):
1956 _Fail("Could not shutdown instance '%s' even by destroy", instance.name)
1957
1958 try:
1959 hyper.CleanupInstance(instance.name)
1960 except errors.HypervisorError, err:
1961 logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1962
1963 _RemoveBlockDevLinks(instance.name, instance.disks)
1964
1965
1966 def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1967 """Reboot an instance.
1968
1969 @type instance: L{objects.Instance}
1970 @param instance: the instance object to reboot
1971 @type reboot_type: str
1972 @param reboot_type: the type of reboot, one the following
1973 constants:
1974 - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1975 instance OS, do not recreate the VM
1976 - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1977 restart the VM (at the hypervisor level)
1978 - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1979 not accepted here, since that mode is handled differently, in
1980 cmdlib, and translates into full stop and start of the
1981 instance (instead of a call_instance_reboot RPC)
1982 @type shutdown_timeout: integer
1983 @param shutdown_timeout: maximum timeout for soft shutdown
1984 @type reason: list of reasons
1985 @param reason: the reason trail for this reboot
1986 @rtype: None
1987
1988 """
1989 # TODO: this is inconsistent with 'StartInstance' and 'InstanceShutdown'
1990 # because those functions simply 'return' on error whereas this one
1991 # raises an exception with '_Fail'
1992 if not _GetInstanceInfo(instance):
1993 _Fail("Cannot reboot instance '%s' that is not running", instance.name)
1994
1995 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1996 if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1997 try:
1998 hyper.RebootInstance(instance)
1999 except errors.HypervisorError, err:
2000 _Fail("Failed to soft reboot instance '%s': %s", instance.name, err)
2001 elif reboot_type == constants.INSTANCE_REBOOT_HARD:
2002 try:
2003 InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
2004 result = StartInstance(instance, False, reason, store_reason=False)
2005 _StoreInstReasonTrail(instance.name, reason)
2006 return result
2007 except errors.HypervisorError, err:
2008 _Fail("Failed to hard reboot instance '%s': %s", instance.name, err)
2009 else:
2010 _Fail("Invalid reboot_type received: '%s'", reboot_type)
2011
2012
2013 def InstanceBalloonMemory(instance, memory):
2014 """Resize an instance's memory.
2015
2016 @type instance: L{objects.Instance}
2017 @param instance: the instance object
2018 @type memory: int
2019 @param memory: new memory amount in MB
2020 @rtype: None
2021
2022 """
2023 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2024 running = hyper.ListInstances(hvparams=instance.hvparams)
2025 if instance.name not in running:
2026 logging.info("Instance %s is not running, cannot balloon", instance.name)
2027 return
2028 try:
2029 hyper.BalloonInstanceMemory(instance, memory)
2030 except errors.HypervisorError, err:
2031 _Fail("Failed to balloon instance memory: %s", err, exc=True)
2032
2033
2034 def MigrationInfo(instance):
2035 """Gather information about an instance to be migrated.
2036
2037 @type instance: L{objects.Instance}
2038 @param instance: the instance definition
2039
2040 """
2041 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2042 try:
2043 info = hyper.MigrationInfo(instance)
2044 except errors.HypervisorError, err:
2045 _Fail("Failed to fetch migration information: %s", err, exc=True)
2046 return info
2047
2048
2049 def AcceptInstance(instance, info, target):
2050 """Prepare the node to accept an instance.
2051
2052 @type instance: L{objects.Instance}
2053 @param instance: the instance definition
2054 @type info: string/data (opaque)
2055 @param info: migration information, from the source node
2056 @type target: string
2057 @param target: target host (usually ip), on this node
2058
2059 """
2060 # TODO: why is this required only for DTS_EXT_MIRROR?
2061 if instance.disk_template in constants.DTS_EXT_MIRROR:
2062 # Create the symlinks, as the disks are not active
2063 # in any way
2064 try:
2065 _GatherAndLinkBlockDevs(instance)
2066 except errors.BlockDeviceError, err:
2067 _Fail("Block device error: %s", err, exc=True)
2068
2069 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2070 try:
2071 hyper.AcceptInstance(instance, info, target)
2072 except errors.HypervisorError, err:
2073 if instance.disk_template in constants.DTS_EXT_MIRROR:
2074 _RemoveBlockDevLinks(instance.name, instance.disks)
2075 _Fail("Failed to accept instance: %s", err, exc=True)
2076
2077
2078 def FinalizeMigrationDst(instance, info, success):
2079 """Finalize any preparation to accept an instance.
2080
2081 @type instance: L{objects.Instance}
2082 @param instance: the instance definition
2083 @type info: string/data (opaque)
2084 @param info: migration information, from the source node
2085 @type success: boolean
2086 @param success: whether the migration was a success or a failure
2087
2088 """
2089 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2090 try:
2091 hyper.FinalizeMigrationDst(instance, info, success)
2092 except errors.HypervisorError, err:
2093 _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
2094
2095
2096 def MigrateInstance(cluster_name, instance, target, live):
2097 """Migrates an instance to another node.
2098
2099 @type cluster_name: string
2100 @param cluster_name: name of the cluster
2101 @type instance: L{objects.Instance}
2102 @param instance: the instance definition
2103 @type target: string
2104 @param target: the target node name
2105 @type live: boolean
2106 @param live: whether the migration should be done live or not (the
2107 interpretation of this parameter is left to the hypervisor)
2108 @raise RPCFail: if migration fails for some reason
2109
2110 """
2111 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2112
2113 try:
2114 hyper.MigrateInstance(cluster_name, instance, target, live)
2115 except errors.HypervisorError, err:
2116 _Fail("Failed to migrate instance: %s", err, exc=True)
2117
2118
2119 def FinalizeMigrationSource(instance, success, live):
2120 """Finalize the instance migration on the source node.
2121
2122 @type instance: L{objects.Instance}
2123 @param instance: the instance definition of the migrated instance
2124 @type success: bool
2125 @param success: whether the migration succeeded or not
2126 @type live: bool
2127 @param live: whether the user requested a live migration or not
2128 @raise RPCFail: If the execution fails for some reason
2129
2130 """
2131 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2132
2133 try:
2134 hyper.FinalizeMigrationSource(instance, success, live)
2135 except Exception, err: # pylint: disable=W0703
2136 _Fail("Failed to finalize the migration on the source node: %s", err,
2137 exc=True)
2138
2139
2140 def GetMigrationStatus(instance):
2141 """Get the migration status
2142
2143 @type instance: L{objects.Instance}
2144 @param instance: the instance that is being migrated
2145 @rtype: L{objects.MigrationStatus}
2146 @return: the status of the current migration (one of
2147 L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
2148 progress info that can be retrieved from the hypervisor
2149 @raise RPCFail: If the migration status cannot be retrieved
2150
2151 """
2152 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2153 try:
2154 return hyper.GetMigrationStatus(instance)
2155 except Exception, err: # pylint: disable=W0703
2156 _Fail("Failed to get migration status: %s", err, exc=True)
2157
2158
2159 def HotplugDevice(instance, action, dev_type, device, extra, seq):
2160 """Hotplug a device
2161
2162 Hotplug is currently supported only for KVM Hypervisor.
2163 @type instance: L{objects.Instance}
2164 @param instance: the instance to which we hotplug a device
2165 @type action: string
2166 @param action: the hotplug action to perform
2167 @type dev_type: string
2168 @param dev_type: the device type to hotplug
2169 @type device: either L{objects.NIC} or L{objects.Disk}
2170 @param device: the device object to hotplug
2171 @type extra: tuple
2172 @param extra: extra info used for disk hotplug (disk link, drive uri)
2173 @type seq: int
2174 @param seq: the index of the device from master perspective
2175 @raise RPCFail: in case instance does not have KVM hypervisor
2176
2177 """
2178 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2179 try:
2180 hyper.VerifyHotplugSupport(instance, action, dev_type)
2181 except errors.HotplugError, err:
2182 _Fail("Hotplug is not supported: %s", err)
2183
2184 if action == constants.HOTPLUG_ACTION_ADD:
2185 fn = hyper.HotAddDevice
2186 elif action == constants.HOTPLUG_ACTION_REMOVE:
2187 fn = hyper.HotDelDevice
2188 elif action == constants.HOTPLUG_ACTION_MODIFY:
2189 fn = hyper.HotModDevice
2190 else:
2191 assert action in constants.HOTPLUG_ALL_ACTIONS
2192
2193 return fn(instance, dev_type, device, extra, seq)
2194
2195
2196 def HotplugSupported(instance):
2197 """Checks if hotplug is generally supported.
2198
2199 """
2200 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2201 try:
2202 hyper.HotplugSupported(instance)
2203 except errors.HotplugError, err:
2204 _Fail("Hotplug is not supported: %s", err)
2205
2206
2207 def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
2208 """Creates a block device for an instance.
2209
2210 @type disk: L{objects.Disk}
2211 @param disk: the object describing the disk we should create
2212 @type size: int
2213 @param size: the size of the physical underlying device, in MiB
2214 @type owner: str
2215 @param owner: the name of the instance for which disk is created,
2216 used for device cache data
2217 @type on_primary: boolean
2218 @param on_primary: indicates if it is the primary node or not
2219 @type info: string
2220 @param info: string that will be sent to the physical device
2221 creation, used for example to set (LVM) tags on LVs
2222 @type excl_stor: boolean
2223 @param excl_stor: Whether exclusive_storage is active
2224
2225 @return: the new unique_id of the device (this can sometime be
2226 computed only after creation), or None. On secondary nodes,
2227 it's not required to return anything.
2228
2229 """
2230 # TODO: remove the obsolete "size" argument
2231 # pylint: disable=W0613
2232 clist = []
2233 if disk.children:
2234 for child in disk.children:
2235 try:
2236 crdev = _RecursiveAssembleBD(child, owner, on_primary)
2237 except errors.BlockDeviceError, err:
2238 _Fail("Can't assemble device %s: %s", child, err)
2239 if on_primary or disk.AssembleOnSecondary():
2240 # we need the children open in case the device itself has to
2241 # be assembled
2242 try:
2243 # pylint: disable=E1103
2244 crdev.Open()
2245 except errors.BlockDeviceError, err:
2246 _Fail("Can't make child '%s' read-write: %s", child, err)
2247 clist.append(crdev)
2248
2249 try:
2250 device = bdev.Create(disk, clist, excl_stor)
2251 except errors.BlockDeviceError, err:
2252 _Fail("Can't create block device: %s", err)
2253
2254 if on_primary or disk.AssembleOnSecondary():
2255 try:
2256 device.Assemble()
2257 except errors.BlockDeviceError, err:
2258 _Fail("Can't assemble device after creation, unusual event: %s", err)
2259 if on_primary or disk.OpenOnSecondary():
2260 try:
2261 device.Open(force=True)
2262 except errors.BlockDeviceError, err:
2263 _Fail("Can't make device r/w after creation, unusual event: %s", err)
2264 DevCacheManager.UpdateCache(device.dev_path, owner,
2265 on_primary, disk.iv_name)
2266
2267 device.SetInfo(info)
2268
2269 return device.unique_id
2270
2271
2272 def _WipeDevice(path, offset, size):
2273 """This function actually wipes the device.
2274
2275 @param path: The path to the device to wipe
2276 @param offset: The offset in MiB in the file
2277 @param size: The size in MiB to write
2278
2279 """
2280 # Internal sizes are always in Mebibytes; if the following "dd" command
2281 # should use a different block size the offset and size given to this
2282 # function must be adjusted accordingly before being passed to "dd".
2283 block_size = 1024 * 1024
2284
2285 cmd = [constants.DD_CMD, "if=/dev/zero", "seek=%d" % offset,
2286 "bs=%s" % block_size, "oflag=direct", "of=%s" % path,
2287 "count=%d" % size]
2288 result = utils.RunCmd(cmd)
2289
2290 if result.failed:
2291 _Fail("Wipe command '%s' exited with error: %s; output: %s", result.cmd,
2292 result.fail_reason, result.output)
2293
2294
2295 def BlockdevWipe(disk, offset, size):
2296 """Wipes a block device.
2297
2298 @type disk: L{objects.Disk}
2299 @param disk: the disk object we want to wipe
2300 @type offset: int
2301 @param offset: The offset in MiB in the file
2302 @type size: int
2303 @param size: The size in MiB to write
2304
2305 """
2306 try:
2307 rdev = _RecursiveFindBD(disk)
2308 except errors.BlockDeviceError:
2309 rdev = None
2310
2311 if not rdev:
2312 _Fail("Cannot execute wipe for device %s: device not found", disk.iv_name)
2313
2314 # Do cross verify some of the parameters
2315 if offset < 0:
2316 _Fail("Negative offset")
2317 if size < 0:
2318 _Fail("Negative size")
2319 if offset > rdev.size:
2320 _Fail("Offset is bigger than device size")
2321 if (offset + size) > rdev.size:
2322 _Fail("The provided offset and size to wipe is bigger than device size")
2323
2324 _WipeDevice(rdev.dev_path, offset, size)
2325
2326
2327 def BlockdevPauseResumeSync(disks, pause):
2328 """Pause or resume the sync of the block device.
2329
2330 @type disks: list of L{objects.Disk}
2331 @param disks: the disks object we want to pause/resume
2332 @type pause: bool
2333 @param pause: Wheater to pause or resume
2334
2335 """
2336 success = []
2337 for disk in disks:
2338 try:
2339 rdev = _RecursiveFindBD(disk)
2340 except errors.BlockDeviceError:
2341 rdev = None
2342
2343 if not rdev:
2344 success.append((False, ("Cannot change sync for device %s:"
2345 " device not found" % disk.iv_name)))
2346 continue
2347
2348 result = rdev.PauseResumeSync(pause)
2349
2350 if result:
2351 success.append((result, None))
2352 else:
2353 if pause:
2354 msg = "Pause"
2355 else:
2356 msg = "Resume"
2357 success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
2358
2359 return success
2360
2361
2362 def BlockdevRemove(disk):
2363 """Remove a block device.
2364
2365 @note: This is intended to be called recursively.
2366
2367 @type disk: L{objects.Disk}
2368 @param disk: the disk object we should remove
2369 @rtype: boolean
2370 @return: the success of the operation
2371
2372 """
2373 msgs = []
2374 try:
2375 rdev = _RecursiveFindBD(disk)
2376 except errors.BlockDeviceError, err:
2377 # probably can't attach
2378 logging.info("Can't attach to device %s in remove", disk)
2379 rdev = None
2380 if rdev is not None:
2381 r_path = rdev.dev_path
2382
2383 def _TryRemove():
2384 try:
2385 rdev.Remove()
2386 return []
2387 except errors.BlockDeviceError, err:
2388 return [str(err)]
2389
2390 msgs.extend(utils.SimpleRetry([], _TryRemove,
2391 constants.DISK_REMOVE_RETRY_INTERVAL,
2392 constants.DISK_REMOVE_RETRY_TIMEOUT))
2393
2394 if not msgs:
2395 DevCacheManager.RemoveCache(r_path)
2396
2397 if disk.children:
2398 for child in disk.children:
2399 try:
2400 BlockdevRemove(child)
2401 except RPCFail, err:
2402 msgs.append(str(err))
2403
2404 if msgs:
2405 _Fail("; ".join(msgs))
2406
2407
2408 def _RecursiveAssembleBD(disk, owner, as_primary):
2409 """Activate a block device for an instance.
2410
2411 This is run on the primary and secondary nodes for an instance.
2412
2413 @note: this function is called recursively.
2414
2415 @type disk: L{objects.Disk}
2416 @param disk: the disk we try to assemble
2417 @type owner: str
2418 @param owner: the name of the instance which owns the disk
2419 @type as_primary: boolean
2420 @param as_primary: if we should make the block device
2421 read/write
2422
2423 @return: the assembled device or None (in case no device
2424 was assembled)
2425 @raise errors.BlockDeviceError: in case there is an error
2426 during the activation of the children or the device
2427 itself
2428
2429 """
2430 children = []
2431 if disk.children:
2432 mcn = disk.ChildrenNeeded()
2433 if mcn == -1:
2434 mcn = 0 # max number of Nones allowed
2435 else:
2436 mcn = len(disk.children) - mcn # max number of Nones
2437 for chld_disk in disk.children:
2438 try:
2439 cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
2440 except errors.BlockDeviceError, err:
2441 if children.count(None) >= mcn:
2442 raise
2443 cdev = None
2444 logging.error("Error in child activation (but continuing): %s",
2445 str(err))
2446 children.append(cdev)
2447
2448 if as_primary or disk.AssembleOnSecondary():
2449 r_dev = bdev.Assemble(disk, children)
2450 result = r_dev
2451 if as_primary or disk.OpenOnSecondary():
2452 r_dev.Open()
2453 DevCacheManager.UpdateCache(r_dev.dev_path, owner,
2454 as_primary, disk.iv_name)
2455
2456 else:
2457 result = True
2458 return result
2459
2460
2461 def BlockdevAssemble(disk, instance, as_primary, idx):
2462 """Activate a block device for an instance.
2463
2464 This is a wrapper over _RecursiveAssembleBD.
2465
2466 @rtype: str or boolean
2467 @return: a tuple with the C{/dev/...} path and the created symlink
2468 for primary nodes, and (C{True}, C{True}) for secondary nodes
2469
2470 """
2471 try:
2472 result = _RecursiveAssembleBD(disk, instance.name, as_primary)
2473 if isinstance(result, BlockDev):
2474 # pylint: disable=E1103
2475 dev_path = result.dev_path
2476 link_name = None
2477 uri = None
2478 if as_primary:
2479 link_name = _SymlinkBlockDev(instance.name, dev_path, idx)
2480 uri = _CalculateDeviceURI(instance, disk, result)
2481 elif result:
2482 return result, result
2483 else:
2484 _Fail("Unexpected result from _RecursiveAssembleBD")
2485 except errors.BlockDeviceError, err:
2486 _Fail("Error while assembling disk: %s", err, exc=True)
2487 except OSError, err:
2488 _Fail("Error while symlinking disk: %s", err, exc=True)
2489
2490 return dev_path, link_name, uri
2491
2492
2493 def BlockdevShutdown(disk):
2494 """Shut down a block device.
2495
2496 First, if the device is assembled (Attach() is successful), then
2497 the device is shutdown. Then the children of the device are
2498 shutdown.
2499
2500 This function is called recursively. Note that we don't cache the
2501 children or such, as oppossed to assemble, shutdown of different
2502 devices doesn't require that the upper device was active.
2503
2504 @type disk: L{objects.Disk}
2505 @param disk: the description of the disk we should
2506 shutdown
2507 @rtype: None
2508
2509 """
2510 msgs = []
2511 r_dev = _RecursiveFindBD(disk)
2512 if r_dev is not None:
2513 r_path = r_dev.dev_path
2514 try:
2515 r_dev.Shutdown()
2516 DevCacheManager.RemoveCache(r_path)
2517 except errors.BlockDeviceError, err:
2518 msgs.append(str(err))
2519
2520 if disk.children:
2521 for child in disk.children:
2522 try:
2523 BlockdevShutdown(child)
2524 except RPCFail, err:
2525 msgs.append(str(err))
2526
2527 if msgs:
2528 _Fail("; ".join(msgs))
2529
2530
2531 def BlockdevAddchildren(parent_cdev, new_cdevs):
2532 """Extend a mirrored block device.
2533
2534 @type parent_cdev: L{objects.Disk}
2535 @param parent_cdev: the disk to which we should add children
2536 @type new_cdevs: list of L{objects.Disk}
2537 @param new_cdevs: the list of children which we should add
2538 @rtype: None
2539
2540 """
2541 parent_bdev = _RecursiveFindBD(parent_cdev)
2542 if parent_bdev is None:
2543 _Fail("Can't find parent device '%s' in add children", parent_cdev)
2544 new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
2545 if new_bdevs.count(None) > 0:
2546 _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
2547 parent_bdev.AddChildren(new_bdevs)
2548
2549
2550 def BlockdevRemovechildren(parent_cdev, new_cdevs):
2551 """Shrink a mirrored block device.
2552
2553 @type parent_cdev: L{objects.Disk}
2554 @param parent_cdev: the disk from which we should remove children
2555 @type new_cdevs: list of L{objects.Disk}
2556 @param new_cdevs: the list of children which we should remove
2557 @rtype: None
2558
2559 """
2560 parent_bdev = _RecursiveFindBD(parent_cdev)
2561 if parent_bdev is None:
2562 _Fail("Can't find parent device '%s' in remove children", parent_cdev)
2563 devs = []
2564 for disk in new_cdevs:
2565 rpath = disk.StaticDevPath()
2566 if rpath is None:
2567 bd = _RecursiveFindBD(disk)
2568 if bd is None:
2569 _Fail("Can't find device %s while removing children", disk)
2570 else:
2571 devs.append(bd.dev_path)
2572 else:
2573 if not utils.IsNormAbsPath(rpath):
2574 _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
2575 devs.append(rpath)
2576 parent_bdev.RemoveChildren(devs)
2577
2578
2579 def BlockdevGetmirrorstatus(disks):
2580 """Get the mirroring status of a list of devices.
2581
2582 @type disks: list of L{objects.Disk}
2583 @param disks: the list of disks which we should query
2584 @rtype: disk
2585 @return: List of L{objects.BlockDevStatus}, one for each disk
2586 @raise errors.BlockDeviceError: if any of the disks cannot be
2587 found
2588
2589 """
2590 stats = []
2591 for dsk in disks:
2592 rbd = _RecursiveFindBD(dsk)
2593 if rbd is None:
2594 _Fail("Can't find device %s", dsk)
2595
2596 stats.append(rbd.CombinedSyncStatus())
2597
2598 return stats
2599
2600
2601 def BlockdevGetmirrorstatusMulti(disks):
2602 """Get the mirroring status of a list of devices.
2603
2604 @type disks: list of L{objects.Disk}
2605 @param disks: the list of disks which we should query
2606 @rtype: disk
2607 @return: List of tuples, (bool, status), one for each disk; bool denotes
2608 success/failure, status is L{objects.BlockDevStatus} on success, string
2609 otherwise
2610
2611 """
2612 result = []
2613 for disk in disks:
2614 try:
2615 rbd = _RecursiveFindBD(disk)
2616 if rbd is None:
2617 result.append((False, "Can't find device %s" % disk))
2618 continue
2619
2620 status = rbd.CombinedSyncStatus()
2621 except errors.BlockDeviceError, err:
2622 logging.exception("Error while getting disk status")
2623 result.append((False, str(err)))
2624 else:
2625 result.append((True, status))
2626
2627 assert len(disks) == len(result)
2628
2629 return result
2630
2631
2632 def _RecursiveFindBD(disk):
2633 """Check if a device is activated.
2634
2635 If so, return information about the real device.
2636
2637 @type disk: L{objects.Disk}
2638 @param disk: the disk object we need to find
2639
2640 @return: None if the device can't be found,
2641 otherwise the device instance
2642
2643 """
2644 children = []
2645 if disk.children:
2646 for chdisk in disk.children:
2647 children.append(_RecursiveFindBD(chdisk))
2648
2649 return bdev.FindDevice(disk, children)
2650
2651
2652 def _OpenRealBD(disk):
2653 """Opens the underlying block device of a disk.
2654
2655 @type disk: L{objects.Disk}
2656 @param disk: the disk object we want to open
2657
2658 """
2659 real_disk = _RecursiveFindBD(disk)
2660 if real_disk is None:
2661 _Fail("Block device '%s' is not set up", disk)
2662
2663 real_disk.Open()
2664
2665 return real_disk
2666
2667
2668 def BlockdevFind(disk):
2669 """Check if a device is activated.
2670
2671 If it is, return information about the real device.
2672
2673 @type disk: L{objects.Disk}
2674 @param disk: the disk to find
2675 @rtype: None or objects.BlockDevStatus
2676 @return: None if the disk cannot be found, otherwise a the current
2677 information
2678
2679 """
2680 try:
2681 rbd = _RecursiveFindBD(disk)
2682 except errors.BlockDeviceError, err:
2683 _Fail("Failed to find device: %s", err, exc=True)
2684
2685 if rbd is None:
2686 return None
2687
2688 return rbd.GetSyncStatus()
2689
2690
2691 def BlockdevGetdimensions(disks):
2692 """Computes the size of the given disks.
2693
2694 If a disk is not found, returns None instead.
2695
2696 @type disks: list of L{objects.Disk}
2697 @param disks: the list of disk to compute the size for
2698 @rtype: list
2699 @return: list with elements None if the disk cannot be found,
2700 otherwise the pair (size, spindles), where spindles is None if the
2701 device doesn't support that
2702
2703 """
2704 result = []
2705 for cf in disks:
2706 try:
2707 rbd = _RecursiveFindBD(cf)
2708 except errors.BlockDeviceError:
2709 result.append(None)
2710 continue
2711 if rbd is None:
2712 result.append(None)
2713 else:
2714 result.append(rbd.GetActualDimensions())
2715 return result
2716
2717
2718 def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2719 """Write a file to the filesystem.
2720
2721 This allows the master to overwrite(!) a file. It will only perform
2722 the operation if the file belongs to a list of configuration files.
2723
2724 @type file_name: str
2725 @param file_name: the target file name
2726 @type data: str
2727 @param data: the new contents of the file
2728 @type mode: int
2729 @param mode: the mode to give the file (can be None)
2730 @type uid: string
2731 @param uid: the owner of the file
2732 @type gid: string
2733 @param gid: the group of the file
2734 @type atime: float
2735 @param atime: the atime to set on the file (can be None)
2736 @type mtime: float
2737 @param mtime: the mtime to set on the file (can be None)
2738 @rtype: None
2739
2740 """
2741 file_name = vcluster.LocalizeVirtualPath(file_name)
2742
2743 if not os.path.isabs(file_name):
2744 _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2745
2746 if file_name not in _ALLOWED_UPLOAD_FILES:
2747 _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2748 file_name)
2749
2750 raw_data = _Decompress(data)
2751
2752 if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2753 _Fail("Invalid username/groupname type")
2754
2755 getents = runtime.GetEnts()
2756 uid = getents.LookupUser(uid)
2757 gid = getents.LookupGroup(gid)
2758
2759 utils.SafeWriteFile(file_name, None,
2760 data=raw_data, mode=mode, uid=uid, gid=gid,
2761 atime=atime, mtime=mtime)
2762
2763
2764 def RunOob(oob_program, command, node, timeout):
2765 """Executes oob_program with given command on given node.
2766
2767 @param oob_program: The path to the executable oob_program
2768 @param command: The command to invoke on oob_program
2769 @param node: The node given as an argument to the program
2770 @param timeout: Timeout after which we kill the oob program
2771
2772 @return: stdout
2773 @raise RPCFail: If execution fails for some reason
2774
2775 """
2776 result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2777
2778 if result.failed:
2779 _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2780 result.fail_reason, result.output)
2781
2782 return result.stdout
2783
2784
2785 def _OSOndiskAPIVersion(os_dir):
2786 """Compute and return the API version of a given OS.
2787
2788 This function will try to read the API version of the OS residing in
2789 the 'os_dir' directory.
2790
2791 @type os_dir: str
2792 @param os_dir: the directory in which we should look for the OS
2793 @rtype: tuple
2794 @return: tuple (status, data) with status denoting the validity and
2795 data holding either the vaid versions or an error message
2796
2797 """
2798 api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2799
2800 try:
2801 st = os.stat(api_file)
2802 except EnvironmentError, err:
2803 return False, ("Required file '%s' not found under path %s: %s" %
2804 (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2805
2806 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2807 return False, ("File '%s' in %s is not a regular file" %
2808 (constants.OS_API_FILE, os_dir))
2809
2810 try:
2811 api_versions = utils.ReadFile(api_file).splitlines()
2812 except EnvironmentError, err:
2813 return False, ("Error while reading the API version file at %s: %s" %
2814 (api_file, utils.ErrnoOrStr(err)))
2815
2816 try:
2817 api_versions = [int(version.strip()) for version in api_versions]
2818 except (TypeError, ValueError), err:
2819 return False, ("API version(s) can't be converted to integer: %s" %
2820 str(err))
2821
2822 return True, api_versions
2823
2824
2825 def DiagnoseOS(top_dirs=None):
2826 """Compute the validity for all OSes.
2827
2828 @type top_dirs: list
2829 @param top_dirs: the list of directories in which to
2830 search (if not given defaults to
2831 L{pathutils.OS_SEARCH_PATH})
2832 @rtype: list of L{objects.OS}
2833 @return: a list of tuples (name, path, status, diagnose, variants,
2834 parameters, api_version) for all (potential) OSes under all
2835 search paths, where:
2836 - name is the (potential) OS name
2837 - path is the full path to the OS
2838 - status True/False is the validity of the OS
2839 - diagnose is the error message for an invalid OS, otherwise empty
2840 - variants is a list of supported OS variants, if any
2841 - parameters is a list of (name, help) parameters, if any
2842 - api_version is a list of support OS API versions
2843
2844 """
2845 if top_dirs is None:
2846 top_dirs = pathutils.OS_SEARCH_PATH
2847
2848 result = []
2849 for dir_name in top_dirs:
2850 if os.path.isdir(dir_name):
2851 try:
2852 f_names = utils.ListVisibleFiles(dir_name)
2853 except EnvironmentError, err:
2854 logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2855 break
2856 for name in f_names:
2857 os_path = utils.PathJoin(dir_name, name)
2858 status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2859 if status:
2860 diagnose = ""
2861 variants = os_inst.supported_variants
2862 parameters = os_inst.supported_parameters
2863 api_versions = os_inst.api_versions
2864 else:
2865 diagnose = os_inst
2866 variants = parameters = api_versions = []
2867 result.append((name, os_path, status, diagnose, variants,
2868 parameters, api_versions))
2869
2870 return result
2871
2872
2873 def _TryOSFromDisk(name, base_dir=None):
2874 """Create an OS instance from disk.
2875
2876 This function will return an OS instance if the given name is a
2877 valid OS name.
2878
2879 @type base_dir: string
2880 @keyword base_dir: Base directory containing OS installations.
2881 Defaults to a search in all the OS_SEARCH_PATH dirs.
2882 @rtype: tuple
2883 @return: success and either the OS instance if we find a valid one,
2884 or error message
2885
2886 """
2887 if base_dir is None:
2888 os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
2889 else:
2890 os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
2891
2892 if os_dir is None:
2893 return False, "Directory for OS %s not found in search path" % name
2894
2895 status, api_versions = _OSOndiskAPIVersion(os_dir)
2896 if not status:
2897 # push the error up
2898 return status, api_versions
2899
2900 if not constants.OS_API_VERSIONS.intersection(api_versions):
2901 return False, ("API version mismatch for path '%s': found %s, want %s." %
2902 (os_dir, api_versions, constants.OS_API_VERSIONS))
2903
2904 # OS Files dictionary, we will populate it with the absolute path
2905 # names; if the value is True, then it is a required file, otherwise
2906 # an optional one
2907 os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
2908
2909 if max(api_versions) >= constants.OS_API_V15:
2910 os_files[constants.OS_VARIANTS_FILE] = False
2911
2912 if max(api_versions) >= constants.OS_API_V20:
2913 os_files[constants.OS_PARAMETERS_FILE] = True
2914 else:
2915 del os_files[constants.OS_SCRIPT_VERIFY]
2916
2917 for (filename, required) in os_files.items():
2918 os_files[filename] = utils.PathJoin(os_dir, filename)
2919
2920 try:
2921 st = os.stat(os_files[filename])
2922 except EnvironmentError, err:
2923 if err.errno == errno.ENOENT and not required:
2924 del os_files[filename]
2925 continue
2926 return False, ("File '%s' under path '%s' is missing (%s)" %
2927 (filename, os_dir, utils.ErrnoOrStr(err)))
2928
2929 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2930 return False, ("File '%s' under path '%s' is not a regular file" %
2931 (filename, os_dir))
2932
2933 if filename in constants.OS_SCRIPTS:
2934 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
2935 return False, ("File '%s' under path '%s' is not executable" %
2936 (filename, os_dir))
2937
2938 variants = []
2939 if constants.OS_VARIANTS_FILE in os_files:
2940 variants_file = os_files[constants.OS_VARIANTS_FILE]
2941 try:
2942 variants = \
2943 utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
2944 except EnvironmentError, err:
2945 # we accept missing files, but not other errors
2946 if err.errno != errno.ENOENT:
2947 return False, ("Error while reading the OS variants file at %s: %s" %
2948 (variants_file, utils.ErrnoOrStr(err)))
2949
2950 parameters = []
2951 if constants.OS_PARAMETERS_FILE in os_files:
2952 parameters_file = os_files[constants.OS_PARAMETERS_FILE]
2953 try:
2954 parameters = utils.ReadFile(parameters_file).splitlines()
2955 except EnvironmentError, err:
2956 return False, ("Error while reading the OS parameters file at %s: %s" %
2957 (parameters_file, utils.ErrnoOrStr(err)))
2958 parameters = [v.split(None, 1) for v in parameters]
2959
2960 os_obj = objects.OS(name=name, path=os_dir,
2961 create_script=os_files[constants.OS_SCRIPT_CREATE],
2962 export_script=os_files[constants.OS_SCRIPT_EXPORT],
2963 import_script=os_files[constants.OS_SCRIPT_IMPORT],
2964 rename_script=os_files[constants.OS_SCRIPT_RENAME],
2965 verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
2966 None),
2967 supported_variants=variants,
2968 supported_parameters=parameters,
2969 api_versions=api_versions)
2970 return True, os_obj
2971
2972
2973 def OSFromDisk(name, base_dir=None):
2974 """Create an OS instance from disk.
2975
2976 This function will return an OS instance if the given name is a
2977 valid OS name. Otherwise, it will raise an appropriate
2978 L{RPCFail} exception, detailing why this is not a valid OS.
2979
2980 This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
2981 an exception but returns true/false status data.
2982
2983 @type base_dir: string
2984 @keyword base_dir: Base directory containing OS installations.
2985 Defaults to a search in all the OS_SEARCH_PATH dirs.
2986 @rtype: L{objects.OS}
2987 @return: the OS instance if we find a valid one
2988 @raise RPCFail: if we don't find a valid OS
2989
2990 """
2991 name_only = objects.OS.GetName(name)
2992 status, payload = _TryOSFromDisk(name_only, base_dir)
2993
2994 if not status:
2995 _Fail(payload)
2996
2997 return payload
2998
2999
3000 def OSCoreEnv(os_name, inst_os, os_params, debug=0):
3001 """Calculate the basic environment for an os script.
3002
3003 @type os_name: str
3004 @param os_name: full operating system name (including variant)
3005 @type inst_os: L{objects.OS}
3006 @param inst_os: operating system for which the environment is being built
3007 @type os_params: dict
3008 @param os_params: the OS parameters
3009 @type debug: integer
3010 @param debug: debug level (0 or 1, for OS Api 10)
3011 @rtype: dict
3012 @return: dict of environment variables
3013 @raise errors.BlockDeviceError: if the block device
3014 cannot be found
3015
3016 """
3017 result = {}
3018 api_version = \
3019 max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
3020 result["OS_API_VERSION"] = "%d" % api_version
3021 result["OS_NAME"] = inst_os.name
3022 result["DEBUG_LEVEL"] = "%d" % debug
3023
3024 # OS variants
3025 if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
3026 variant = objects.OS.GetVariant(os_name)
3027 if not variant:
3028 variant = inst_os.supported_variants[0]
3029 else:
3030 variant = ""
3031 result["OS_VARIANT"] = variant
3032
3033 # OS params
3034 for pname, pvalue in os_params.items():
3035 result["OSP_%s" % pname.upper()] = pvalue
3036
3037 # Set a default path otherwise programs called by OS scripts (or
3038 # even hooks called from OS scripts) might break, and we don't want
3039 # to have each script require setting a PATH variable
3040 result["PATH"] = constants.HOOKS_PATH
3041
3042 return result
3043
3044
3045 def OSEnvironment(instance, inst_os, debug=0):
3046 """Calculate the environment for an os script.
3047
3048 @type instance: L{objects.Instance}
3049 @param instance: target instance for the os script run
3050 @type inst_os: L{objects.OS}
3051 @param inst_os: operating system for which the environment is being built
3052 @type debug: integer
3053 @param debug: debug level (0 or 1, for OS Api 10)
3054 @rtype: dict
3055 @return: dict of environment variables
3056 @raise errors.BlockDeviceError: if the block device
3057 cannot be found
3058
3059 """
3060 result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
3061
3062 for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
3063 result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
3064
3065 result["HYPERVISOR"] = instance.hypervisor
3066 result["DISK_COUNT"] = "%d" % len(instance.disks)
3067 result["NIC_COUNT"] = "%d" % len(instance.nics)
3068 result["INSTANCE_SECONDARY_NODES"] = \
3069 ("%s" % " ".join(instance.secondary_nodes))
3070
3071 # Disks
3072 for idx, disk in enumerate(instance.disks):
3073 real_disk = _OpenRealBD(disk)
3074 result["DISK_%d_PATH" % idx] = real_disk.dev_path
3075 result["DISK_%d_ACCESS" % idx] = disk.mode
3076 result["DISK_%d_UUID" % idx] = disk.uuid
3077 if disk.name:
3078 result["DISK_%d_NAME" % idx] = disk.name
3079 if constants.HV_DISK_TYPE in instance.hvparams:
3080 result["DISK_%d_FRONTEND_TYPE" % idx] = \
3081 instance.hvparams[constants.HV_DISK_TYPE]
3082 if disk.dev_type in constants.DTS_BLOCK:
3083 result["DISK_%d_BACKEND_TYPE" % idx] = "block"
3084 elif disk.dev_type in constants.DTS_FILEBASED:
3085 result["DISK_%d_BACKEND_TYPE" % idx] = \
3086 "file:%s" % disk.logical_id[0]
3087
3088 # NICs
3089 for idx, nic in enumerate(instance.nics):
3090 result["NIC_%d_MAC" % idx] = nic.mac
3091 result["NIC_%d_UUID" % idx] = nic.uuid
3092 if nic.name:
3093 result["NIC_%d_NAME" % idx] = nic.name
3094 if nic.ip:
3095 result["NIC_%d_IP" % idx] = nic.ip
3096 result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
3097 if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3098 result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
3099 if nic.nicparams[constants.NIC_LINK]:
3100 result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
3101 if nic.netinfo:
3102 nobj = objects.Network.FromDict(nic.netinfo)
3103 result.update(nobj.HooksDict("NIC_%d_" % idx))
3104 if constants.HV_NIC_TYPE in instance.hvparams:
3105 result["NIC_%d_FRONTEND_TYPE" % idx] = \
3106 instance.hvparams[constants.HV_NIC_TYPE]
3107
3108 # HV/BE params
3109 for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
3110 for key, value in source.items():
3111 result["INSTANCE_%s_%s" % (kind, key)] = str(value)
3112
3113 return result
3114
3115
3116 def DiagnoseExtStorage(top_dirs=None):
3117 """Compute the validity for all ExtStorage Providers.
3118
3119 @type top_dirs: list
3120 @param top_dirs: the list of directories in which to
3121 search (if not given defaults to
3122 L{pathutils.ES_SEARCH_PATH})
3123 @rtype: list of L{objects.ExtStorage}
3124 @return: a list of tuples (name, path, status, diagnose, parameters)
3125 for all (potential) ExtStorage Providers under all
3126 search paths, where:
3127 - name is the (potential) ExtStorage Provider
3128 - path is the full path to the ExtStorage Provider
3129 - status True/False is the validity of the ExtStorage Provider
3130 - diagnose is the error message for an invalid ExtStorage Provider,
3131 otherwise empty
3132 - parameters is a list of (name, help) parameters, if any
3133
3134 """
3135 if top_dirs is None:
3136 top_dirs = pathutils.ES_SEARCH_PATH
3137
3138 result = []
3139 for dir_name in top_dirs:
3140 if os.path.isdir(dir_name):
3141 try:
3142 f_names = utils.ListVisibleFiles(dir_name)
3143 except EnvironmentError, err:
3144 logging.exception("Can't list the ExtStorage directory %s: %s",
3145 dir_name, err)
3146 break
3147 for name in f_names:
3148 es_path = utils.PathJoin(dir_name, name)
3149 status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
3150 if status:
3151 diagnose = ""
3152 parameters = es_inst.supported_parameters
3153 else:
3154 diagnose = es_inst
3155 parameters = []
3156 result.append((name, es_path, status, diagnose, parameters))
3157
3158 return result
3159
3160
3161 def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
3162 """Grow a stack of block devices.
3163
3164 This function is called recursively, with the childrens being the
3165 first ones to resize.
3166
3167 @type disk: L{objects.Disk}
3168 @param disk: the disk to be grown
3169 @type amount: integer
3170 @param amount: the amount (in mebibytes) to grow with
3171 @type dryrun: boolean
3172 @param dryrun: whether to execute the operation in simulation mode
3173 only, without actually increasing the size
3174 @param backingstore: whether to execute the operation on backing storage
3175 only, or on "logical" storage only; e.g. DRBD is logical storage,
3176 whereas LVM, file, RBD are backing storage
3177 @rtype: (status, result)
3178 @type excl_stor: boolean
3179 @param excl_stor: Whether exclusive_storage is active
3180 @return: a tuple with the status of the operation (True/False), and
3181 the errors message if status is False
3182
3183 """
3184 r_dev = _RecursiveFindBD(disk)
3185 if r_dev is None:
3186 _Fail("Cannot find block device %s", disk)
3187
3188 try:
3189 r_dev.Grow(amount, dryrun, backingstore, excl_stor)
3190 except errors.BlockDeviceError, err:
3191 _Fail("Failed to grow block device: %s", err, exc=True)
3192
3193
3194 def BlockdevSnapshot(disk):
3195 """Create a snapshot copy of a block device.
3196
3197 This function is called recursively, and the snapshot is actually created
3198 just for the leaf lvm backend device.
3199
3200 @type disk: L{objects.Disk}
3201 @param disk: the disk to be snapshotted
3202 @rtype: string
3203 @return: snapshot disk ID as (vg, lv)
3204
3205 """
3206 if disk.dev_type == constants.DT_DRBD8:
3207 if not disk.children:
3208 _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
3209 disk.unique_id)
3210 return BlockdevSnapshot(disk.children[0])
3211 elif disk.dev_type == constants.DT_PLAIN:
3212 r_dev = _RecursiveFindBD(disk)
3213 if r_dev is not None:
3214 # FIXME: choose a saner value for the snapshot size
3215 # let's stay on the safe side and ask for the full size, for now
3216 return r_dev.Snapshot(disk.size)
3217 else:
3218 _Fail("Cannot find block device %s", disk)
3219 else:
3220 _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
3221 disk.logical_id, disk.dev_type)
3222
3223
3224 def BlockdevSetInfo(disk, info):
3225 """Sets 'metadata' information on block devices.
3226
3227 This function sets 'info' metadata on block devices. Initial
3228 information is set at device creation; this function should be used
3229 for example after renames.
3230
3231 @type disk: L{objects.Disk}
3232 @param disk: the disk to be grown
3233 @type info: string
3234 @param info: new 'info' metadata
3235 @rtype: (status, result)
3236 @return: a tuple with the status of the operation (True/False), and
3237 the errors message if status is False
3238
3239 """
3240 r_dev = _RecursiveFindBD(disk)
3241 if r_dev is None:
3242 _Fail("Cannot find block device %s", disk)
3243
3244 try:
3245 r_dev.SetInfo(info)
3246 except errors.BlockDeviceError, err:
3247 _Fail("Failed to set information on block device: %s", err, exc=True)
3248
3249
3250 def FinalizeExport(instance, snap_disks):
3251 """Write out the export configuration information.
3252
3253 @type instance: L{objects.Instance}
3254 @param instance: the instance which we export, used for
3255 saving configuration
3256 @type snap_disks: list of L{objects.Disk}
3257 @param snap_disks: list of snapshot block devices, which
3258 will be used to get the actual name of the dump file
3259
3260 @rtype: None
3261
3262 """
3263 destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
3264 finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
3265
3266 config = objects.SerializableConfigParser()
3267
3268 config.add_section(constants.INISECT_EXP)
3269 config.set(constants.INISECT_EXP, "version", "0")
3270 config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
3271 config.set(constants.INISECT_EXP, "source", instance.primary_node)
3272 config.set(constants.INISECT_EXP, "os", instance.os)
3273 config.set(constants.INISECT_EXP, "compression", "none")
3274
3275 config.add_section(constants.INISECT_INS)
3276 config.set(constants.INISECT_INS, "name", instance.name)
3277 config.set(constants.INISECT_INS, "maxmem", "%d" %
3278 instance.beparams[constants.BE_MAXMEM])
3279 config.set(constants.INISECT_INS, "minmem", "%d" %
3280 instance.beparams[constants.BE_MINMEM])
3281 # "memory" is deprecated, but useful for exporting to old ganeti versions
3282 config.set(constants.INISECT_INS, "memory", "%d" %
3283 instance.beparams[constants.BE_MAXMEM])
3284 config.set(constants.INISECT_INS, "vcpus", "%d" %
3285 instance.beparams[constants.BE_VCPUS])
3286 config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
3287 config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
3288 config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
3289
3290 nic_total = 0
3291 for nic_count, nic in enumerate(instance.nics):
3292 nic_total += 1
3293 config.set(constants.INISECT_INS, "nic%d_mac" %
3294 nic_count, "%s" % nic.mac)
3295 config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
3296 config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
3297 "%s" % nic.network)
3298 config.set(constants.INISECT_INS, "nic%d_name" % nic_count,
3299 "%s" % nic.name)
3300 for param in constants.NICS_PARAMETER_TYPES:
3301 config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
3302 "%s" % nic.nicparams.get(param, None))
3303 # TODO: redundant: on load can read nics until it doesn't exist
3304 config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
3305
3306 disk_total = 0
3307 for disk_count, disk in enumerate(snap_disks):
3308 if disk:
3309 disk_total += 1
3310 config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
3311 ("%s" % disk.iv_name))
3312 config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
3313 ("%s" % disk.logical_id[1]))
3314 config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
3315 ("%d" % disk.size))
3316 config.set(constants.INISECT_INS, "disk%d_name" % disk_count,
3317 "%s" % disk.name)
3318
3319 config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
3320
3321 # New-style hypervisor/backend parameters
3322
3323 config.add_section(constants.INISECT_HYP)
3324 for name, value in instance.hvparams.items():
3325 if name not in constants.HVC_GLOBALS:
3326 config.set(constants.INISECT_HYP, name, str(value))
3327
3328 config.add_section(constants.INISECT_BEP)
3329 for name, value in instance.beparams.items():
3330 config.set(constants.INISECT_BEP, name, str(value))
3331
3332 config.add_section(constants.INISECT_OSP)
3333 for name, value in instance.osparams.items():
3334 config.set(constants.INISECT_OSP, name, str(value))
3335
3336 utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
3337 data=config.Dumps())
3338 shutil.rmtree(finaldestdir, ignore_errors=True)
3339 shutil.move(destdir, finaldestdir)
3340
3341
3342 def ExportInfo(dest):
3343 """Get export configuration information.
3344
3345 @type dest: str
3346 @param dest: directory containing the export
3347
3348 @rtype: L{objects.SerializableConfigParser}
3349 @return: a serializable config file containing the
3350 export info
3351
3352 """
3353 cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE)
3354
3355 config = objects.SerializableConfigParser()
3356 config.read(cff)
3357
3358 if (not config.has_section(constants.INISECT_EXP) or
3359 not config.has_section(constants.INISECT_INS)):
3360 _Fail("Export info file doesn't have the required fields")
3361
3362 return config.Dumps()
3363
3364
3365 def ListExports():
3366 """Return a list of exports currently available on this machine.
3367
3368 @rtype: list
3369 @return: list of the exports
3370
3371 """
3372 if os.path.isdir(pathutils.EXPORT_DIR):