Merge branch 'stable-2.13' into stable-2.14
authorOleg Ponomarev <oponomarev@google.com>
Wed, 11 Nov 2015 18:01:36 +0000 (19:01 +0100)
committerOleg Ponomarev <oponomarev@google.com>
Wed, 11 Nov 2015 19:15:05 +0000 (20:15 +0100)
* stable-2.13
  Extend timeout for gnt-cluster renew-crypto

* stable-2.12
  Revert "Also consider connection time out a network error"
  Clone lists before modifying
  Make lockConfig call retryable

* stable-2.11
  (no changes)

* stable-2.10
  Remove -X from hspace man page
  Make htools tolerate missing "dtotal" and "dfree" on luxi

Conflicts:
    tools/cfgupgrade
Resolution
    take the change into lib/tools/cfgupgrade

Signed-off-by: Oleg Ponomarev <oponomarev@google.com>
Reviewed-by: Klaus Aehlig <aehlig@google.com>

186 files changed:
.gitignore
INSTALL
Makefile.am
NEWS
README
cabal/CabalDependenciesMacros.hs [new file with mode: 0644]
cabal/cabal-from-modules.py [new file with mode: 0644]
cabal/ganeti.template.cabal [new file with mode: 0644]
configure.ac
devel/build_chroot
doc/design-2.14.rst [new file with mode: 0644]
doc/design-configlock.rst
doc/design-disks.rst
doc/design-draft.rst
doc/design-file-based-disks-ownership.rst [new file with mode: 0644]
doc/design-location.rst
doc/design-shared-storage.rst
doc/hooks.rst
doc/iallocator.rst
doc/index.rst
doc/security.rst
doc/virtual-cluster.rst
lib/backend.py
lib/bootstrap.py
lib/cli.py
lib/cli_opts.py
lib/client/gnt_backup.py
lib/client/gnt_cluster.py
lib/client/gnt_instance.py
lib/client/gnt_node.py
lib/cmdlib/__init__.py
lib/cmdlib/backup.py
lib/cmdlib/base.py
lib/cmdlib/cluster/__init__.py [new file with mode: 0644]
lib/cmdlib/cluster/verify.py [moved from lib/cmdlib/cluster.py with 55% similarity]
lib/cmdlib/common.py
lib/cmdlib/group.py
lib/cmdlib/instance.py
lib/cmdlib/instance_create.py [new file with mode: 0644]
lib/cmdlib/instance_migration.py
lib/cmdlib/instance_operation.py
lib/cmdlib/instance_query.py
lib/cmdlib/instance_set_params.py [new file with mode: 0644]
lib/cmdlib/instance_storage.py
lib/cmdlib/instance_utils.py
lib/cmdlib/misc.py
lib/cmdlib/network.py
lib/cmdlib/node.py
lib/cmdlib/test.py
lib/config/__init__.py [moved from lib/config.py with 88% similarity]
lib/config/temporary_reservations.py [new file with mode: 0644]
lib/config/utils.py [new file with mode: 0644]
lib/config/verify.py [new file with mode: 0644]
lib/ht.py
lib/hypervisor/hv_kvm/__init__.py
lib/locking.py
lib/masterd/iallocator.py
lib/masterd/instance.py
lib/mcpu.py
lib/objects.py
lib/query.py
lib/rapi/client_utils.py
lib/rpc_defs.py
lib/server/noded.py
lib/storage/base.py
lib/storage/bdev.py
lib/storage/drbd.py
lib/storage/extstorage.py
lib/storage/filestorage.py
lib/storage/gluster.py
lib/tools/burnin.py
lib/tools/cfgupgrade.py [new file with mode: 0644]
lib/utils/__init__.py
lib/utils/retry.py
lib/utils/text.py
man/ganeti-extstorage-interface.rst
man/gnt-backup.rst
man/gnt-filter.rst
man/gnt-instance.rst
man/gnt-node.rst
man/hbal.rst
pylintrc
qa/ganeti-qa.py
qa/qa_instance.py
qa/qa_utils.py
src/Ganeti/BasicTypes.hs
src/Ganeti/Compat.hs
src/Ganeti/Config.hs
src/Ganeti/Constants.hs
src/Ganeti/DataCollectors/InstStatus.hs
src/Ganeti/DataCollectors/Lv.hs
src/Ganeti/DataCollectors/Types.hs
src/Ganeti/HTools/Backend/IAlloc.hs
src/Ganeti/HTools/Backend/Luxi.hs
src/Ganeti/HTools/Backend/Rapi.hs
src/Ganeti/HTools/Backend/Text.hs
src/Ganeti/HTools/Cluster.hs
src/Ganeti/HTools/Instance.hs
src/Ganeti/HTools/Loader.hs
src/Ganeti/HTools/Node.hs
src/Ganeti/HTools/Program/Hbal.hs
src/Ganeti/HTools/Program/Hspace.hs
src/Ganeti/HTools/Program/Hsqueeze.hs
src/Ganeti/HTools/Tags.hs
src/Ganeti/JSON.hs
src/Ganeti/Lens.hs
src/Ganeti/Locking/Locks.hs
src/Ganeti/Logging/WriterLog.hs
src/Ganeti/Objects.hs
src/Ganeti/Objects/Disk.hs [new file with mode: 0644]
src/Ganeti/Objects/Instance.hs [new file with mode: 0644]
src/Ganeti/Objects/Nic.hs [copied from src/Ganeti/JQueue/Lens.hs with 64% similarity]
src/Ganeti/OpCodes.hs
src/Ganeti/OpParams.hs
src/Ganeti/PartialParams.hs [new file with mode: 0644]
src/Ganeti/Query/Common.hs
src/Ganeti/Query/Group.hs
src/Ganeti/Query/Instance.hs
src/Ganeti/Query/Language.hs
src/Ganeti/Query/Network.hs
src/Ganeti/Query/Node.hs
src/Ganeti/Query/Query.hs
src/Ganeti/THH.hs
src/Ganeti/THH/Field.hs
src/Ganeti/THH/HsRPC.hs
src/Ganeti/Types.hs
src/Ganeti/Utils.hs
src/Ganeti/Utils/AsyncWorker.hs
src/Ganeti/WConfd/ConfigModifications.hs [new file with mode: 0644]
src/Ganeti/WConfd/ConfigVerify.hs
src/Ganeti/WConfd/Core.hs
src/Ganeti/WConfd/Monad.hs
src/Ganeti/WConfd/Server.hs
src/Ganeti/WConfd/Ssconf.hs
src/Ganeti/WConfd/TempRes.hs
test/data/cluster_config_2.13.json [copied from test/data/cluster_config_2.12.json with 93% similarity]
test/data/htools/hail-alloc-nlocation.json [new file with mode: 0644]
test/data/htools/hail-reloc-drbd-crowded.json
test/data/htools/hbal-forth.data [new file with mode: 0644]
test/data/htools/hbal-location-1.data [new file with mode: 0644]
test/data/htools/hbal-location-2.data [new file with mode: 0644]
test/data/instance-disks.txt
test/hs/Test/Ganeti/HTools/Backend/Text.hs
test/hs/Test/Ganeti/HTools/ExtLoader.hs
test/hs/Test/Ganeti/HTools/Instance.hs
test/hs/Test/Ganeti/HTools/Node.hs
test/hs/Test/Ganeti/JQScheduler.hs
test/hs/Test/Ganeti/Locking/Locks.hs
test/hs/Test/Ganeti/Objects.hs
test/hs/Test/Ganeti/OpCodes.hs
test/hs/Test/Ganeti/PartialParams.hs [new file with mode: 0644]
test/hs/Test/Ganeti/Query/Instance.hs
test/hs/Test/Ganeti/THH.hs
test/hs/Test/Ganeti/TestCommon.hs
test/hs/Test/Ganeti/TestHTools.hs
test/hs/Test/Ganeti/Utils.hs
test/hs/htest.hs
test/hs/shelltests/htools-balancing.test
test/hs/shelltests/htools-hail.test
test/hs/shelltests/htools-hbal.test
test/py/cfgupgrade_unittest.py
test/py/cmdlib/backup_unittest.py
test/py/cmdlib/cluster_unittest.py
test/py/cmdlib/cmdlib_unittest.py
test/py/cmdlib/instance_migration_unittest.py
test/py/cmdlib/instance_storage_unittest.py
test/py/cmdlib/instance_unittest.py
test/py/cmdlib/node_unittest.py
test/py/cmdlib/testsupport/__init__.py
test/py/cmdlib/testsupport/cmdlib_testcase.py
test/py/cmdlib/testsupport/processor_mock.py
test/py/cmdlib/testsupport/wconfd_mock.py
test/py/ganeti.backend_unittest.py
test/py/ganeti.client.gnt_instance_unittest.py
test/py/ganeti.config_unittest.py
test/py/ganeti.hypervisor.hv_kvm_unittest.py
test/py/ganeti.mcpu_unittest.py
test/py/ganeti.objects_unittest.py
test/py/ganeti.opcodes_unittest.py
test/py/ganeti.query_unittest.py
test/py/ganeti.storage.filestorage_unittest.py
test/py/ganeti.utils_unittest.py
test/py/testutils/__init__.py [moved from test/py/testutils.py with 96% similarity]
test/py/testutils/config_mock.py [moved from test/py/cmdlib/testsupport/config_mock.py with 95% similarity]
tools/cfgupgrade
tools/move-instance

index 2c03279..e653ffc 100644 (file)
 # /
 /.hsenv
 /Makefile
-/hs-pkg-versions
 /Makefile.ghc
 /Makefile.ghc.bak
 /Makefile.in
 /Makefile.local
 /Session.vim
 /TAGS*
+/apps/
 /aclocal.m4
 /autom4te.cache
 /autotools/install-sh
@@ -39,6 +39,7 @@
 /autotools/py-compile
 /autotools/replace_vars.sed
 /autotools/shell-env-init
+/cabal_macros.h
 /config.log
 /config.status
 /configure
 /devel/squeeze-amd64.conf
 /devel/wheezy-amd64.tar.gz
 /devel/wheezy-amd64.conf
+/dist/
+/empty-cabal-config
 /epydoc.conf
 /ganeti
+/ganeti.cabal
+/ganeti.depsflags
 /stamp-srclinks
 /stamp-directories
 /vcs-version
diff --git a/INSTALL b/INSTALL
index 05b24e2..145b5fc 100644 (file)
--- a/INSTALL
+++ b/INSTALL
@@ -127,6 +127,9 @@ deploy Ganeti on production machines). More specifically:
 - or even better, `The Haskell Platform
   <http://hackage.haskell.org/platform/>`_ which gives you a simple way
   to bootstrap Haskell
+- `cabal-install <http://hackage.haskell.org/package/json>`_ and
+  `Cabal <http://hackage.haskell.org/package/json>`_, the Common Architecture
+  for Building Haskell Applications and Libraries (executable and library)
 - `json <http://hackage.haskell.org/package/json>`_, a JSON library
 - `network <http://hackage.haskell.org/package/network>`_, a basic
   network library
@@ -163,7 +166,8 @@ deploy Ganeti on production machines). More specifically:
 
 Some of these are also available as package in Debian/Ubuntu::
 
-  $ apt-get install ghc libghc-json-dev libghc-network-dev \
+  $ apt-get install ghc cabal-install libghc-cabal-dev \
+                    libghc-json-dev libghc-network-dev \
                     libghc-parallel-dev \
                     libghc-utf8-string-dev libghc-curl-dev \
                     libghc-hslogger-dev \
@@ -190,36 +194,19 @@ The most recent Fedora doesn't provide ``crypto``, ``inotify``. So these
 need to be installed using ``cabal``.
 
 If using a distribution which does not provide these libraries, first
-install the Haskell platform. You can also install ``cabal`` manually::
+install the Haskell platform. Then run::
 
-  $ apt-get install cabal-install
   $ cabal update
 
 Then install the additional native libraries::
 
   $ apt-get install libpcre3-dev libcurl4-openssl-dev
 
-And finally the libraries required for building the packages (only the
-ones not available in your distribution packages) via ``cabal``::
+And finally the libraries required for building the packages via ``cabal``
+(it will automatically pick only those that are not already installed via your
+distribution packages)::
 
-  $ cabal install json network parallel utf8-string curl hslogger \
-                  Crypto text hinotify==0.3.2 regex-pcre \
-                  attoparsec vector base64-bytestring \
-                  lifted-base==0.2.0.3 lens==3.10
-
-(The specified versions are suitable for Debian Wheezy, for other
-distributions different versions might be needed.)
-
-.. _cabal-order-note:
-.. note::
-  When installing additional libraries using ``cabal``, be sure to first
-  install all the required libraries available in your distribution and
-  only then install the rest using ``cabal``.
-  Otherwise cabal might install different versions of libraries that are
-  available in your distribution, causing conflicts during the
-  compilation.
-  This applies in particular when installing libraries for the optional
-  features.
+  $ cabal install --only-dependencies cabal/ganeti.template.cabal
 
 Haskell optional features
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -242,15 +229,13 @@ either apt::
 
 or ``cabal``::
 
-  $ cabal install snap-server PSQueue
+  $ cabal install --only-dependencies cabal/ganeti.template.cabal \
+                  --flags="confd mond metad"
 
 to install them.
 
 .. _cabal-note:
 .. note::
-  If one of the cabal packages fails to install due to unfulfilled
-  dependencies, you can try enabling symlinks in ``~/.cabal/config``.
-
   Make sure that your ``~/.cabal/bin`` directory (or whatever else
   is defined as ``bindir``) is in your ``PATH``.
 
index a30d82e..d61df4c 100644 (file)
@@ -89,6 +89,8 @@ tools_pythondir = $(versionedsharedir)
 
 clientdir = $(pkgpythondir)/client
 cmdlibdir = $(pkgpythondir)/cmdlib
+cmdlib_clusterdir = $(pkgpythondir)/cmdlib/cluster
+configdir = $(pkgpythondir)/config
 hypervisordir = $(pkgpythondir)/hypervisor
 hypervisor_hv_kvmdir = $(pkgpythondir)/hypervisor/hv_kvm
 jqueuedir = $(pkgpythondir)/jqueue
@@ -185,6 +187,7 @@ HS_DIRS_NOROOT = $(filter-out src,$(filter-out test/hs,$(HS_DIRS)))
 DIRS = \
        $(HS_DIRS) \
        autotools \
+       cabal \
        daemons \
        devel \
        devel/data \
@@ -203,7 +206,9 @@ DIRS = \
        lib/build \
        lib/client \
        lib/cmdlib \
+       lib/cmdlib/cluster \
        lib/confd \
+       lib/config \
        lib/jqueue \
        lib/http \
        lib/hypervisor \
@@ -239,6 +244,7 @@ DIRS = \
         test/data/cgroup_root/devices/some_group/lxc \
         test/data/cgroup_root/devices/some_group/lxc/instance1 \
        test/py \
+       test/py/testutils \
        test/py/cmdlib \
        test/py/cmdlib/testsupport \
        tools
@@ -259,6 +265,8 @@ BUILDTIME_DIR_AUTOCREATE = \
 
 BUILDTIME_DIRS = \
        $(BUILDTIME_DIR_AUTOCREATE) \
+       apps \
+       dist \
        doc/html \
        doc/man-html
 
@@ -294,7 +302,12 @@ CLEANFILES = \
        $(addsuffix /*.o,$(HS_DIRS)) \
        $(addsuffix /*.$(HTEST_SUFFIX)_hi,$(HS_DIRS)) \
        $(addsuffix /*.$(HTEST_SUFFIX)_o,$(HS_DIRS)) \
-       hs-pkg-versions \
+       $(HASKELL_PACKAGE_VERSIONS_FILE) \
+       $(CABAL_EXECUTABLES_APPS_STAMPS) \
+       empty-cabal-config \
+       ganeti.cabal \
+       $(HASKELL_PACKAGE_IDS_FILE) \
+       $(HASKELL_PACKAGE_VERSIONS_FILE) \
        Makefile.ghc \
        Makefile.ghc.bak \
        $(PYTHON_BOOTSTRAP) \
@@ -350,6 +363,8 @@ GENERATED_FILES = \
 
 clean-local:
        rm -rf tools/shebang
+       rm -rf apps
+       rm -rf dist
 
 HS_GENERATED_FILES = $(HS_PROGS) src/hluxid src/ganeti-luxid \
        src/hconfd src/ganeti-confd
@@ -374,10 +389,6 @@ built_python_sources = \
        $(nodist_pkgpython_PYTHON) \
        $(nodist_pkgpython_rpc_stub_PYTHON)
 
-# Generating the RPC wrappers depends on many things, so make sure
-# it's built at the end of the built sources
-lib/_generated_rpc.py: | $(built_base_sources) $(built_python_base_sources)
-
 # these are all built from the underlying %.in sources
 BUILT_EXAMPLES = \
        doc/examples/ganeti-kvm-poweroff.initd \
@@ -428,7 +439,6 @@ pkgpython_PYTHON = \
        lib/cli.py \
        lib/cli_opts.py \
        lib/compat.py \
-       lib/config.py \
        lib/constants.py \
        lib/daemon.py \
        lib/errors.py \
@@ -476,13 +486,14 @@ cmdlib_PYTHON = \
        lib/cmdlib/__init__.py \
        lib/cmdlib/backup.py \
        lib/cmdlib/base.py \
-       lib/cmdlib/cluster.py \
        lib/cmdlib/common.py \
        lib/cmdlib/group.py \
        lib/cmdlib/instance.py \
+       lib/cmdlib/instance_create.py \
        lib/cmdlib/instance_migration.py \
        lib/cmdlib/instance_operation.py \
        lib/cmdlib/instance_query.py \
+       lib/cmdlib/instance_set_params.py \
        lib/cmdlib/instance_storage.py \
        lib/cmdlib/instance_utils.py \
        lib/cmdlib/misc.py \
@@ -493,6 +504,16 @@ cmdlib_PYTHON = \
        lib/cmdlib/tags.py \
        lib/cmdlib/test.py
 
+cmdlib_cluster_PYTHON = \
+       lib/cmdlib/cluster/__init__.py \
+       lib/cmdlib/cluster/verify.py
+
+config_PYTHON = \
+       lib/config/__init__.py \
+       lib/config/verify.py \
+       lib/config/temporary_reservations.py \
+       lib/config/utils.py
+
 hypervisor_PYTHON = \
        lib/hypervisor/__init__.py \
        lib/hypervisor/hv_base.py \
@@ -579,7 +600,8 @@ pytools_PYTHON = \
        lib/tools/node_daemon_setup.py \
        lib/tools/prepare_node_join.py \
        lib/tools/ssh_update.py \
-       lib/tools/ssl_update.py
+       lib/tools/ssl_update.py \
+       lib/tools/cfgupgrade.py
 
 utils_PYTHON = \
        lib/utils/__init__.py \
@@ -622,6 +644,7 @@ docinput = \
        doc/design-2.11.rst \
        doc/design-2.12.rst \
        doc/design-2.13.rst \
+       doc/design-2.14.rst \
        doc/design-autorepair.rst \
        doc/design-bulk-create.rst \
        doc/design-ceph-ganeti-support.rst \
@@ -635,6 +658,7 @@ docinput = \
        doc/design-disk-conversion.rst \
        doc/design-disks.rst \
        doc/design-draft.rst \
+       doc/design-file-based-disks-ownership.rst \
        doc/design-file-based-storage.rst \
        doc/design-glusterfs-ganeti-support.rst \
        doc/design-hotplug.rst \
@@ -761,6 +785,9 @@ HFLAGS = \
        -O -Wall -isrc \
        -fwarn-monomorphism-restriction \
        -fwarn-tabs \
+       -optP-include -optP$(HASKELL_PACKAGE_VERSIONS_FILE) \
+       -hide-all-packages \
+       `cat $(HASKELL_PACKAGE_IDS_FILE)` \
        $(GHC_BYVERSION_FLAGS)
 if DEVELOPER_MODE
 HFLAGS += -Werror
@@ -913,7 +940,10 @@ HS_LIB_SRCS = \
        src/Ganeti/Network.hs \
        src/Ganeti/Objects.hs \
        src/Ganeti/Objects/BitArray.hs \
+       src/Ganeti/Objects/Disk.hs \
+       src/Ganeti/Objects/Instance.hs \
        src/Ganeti/Objects/Lens.hs \
+       src/Ganeti/Objects/Nic.hs \
        src/Ganeti/OpCodes.hs \
        src/Ganeti/OpCodes/Lens.hs \
        src/Ganeti/OpParams.hs \
@@ -936,6 +966,7 @@ HS_LIB_SRCS = \
        src/Ganeti/Query/Query.hs \
        src/Ganeti/Query/Server.hs \
        src/Ganeti/Query/Types.hs \
+       src/Ganeti/PartialParams.hs \
        src/Ganeti/Rpc.hs \
        src/Ganeti/Runtime.hs \
        src/Ganeti/SlotMap.hs \
@@ -970,6 +1001,7 @@ HS_LIB_SRCS = \
        src/Ganeti/Utils/Validate.hs \
        src/Ganeti/VCluster.hs \
        src/Ganeti/WConfd/ConfigState.hs \
+       src/Ganeti/WConfd/ConfigModifications.hs \
        src/Ganeti/WConfd/ConfigVerify.hs \
        src/Ganeti/WConfd/ConfigWriter.hs \
        src/Ganeti/WConfd/Client.hs \
@@ -1038,6 +1070,7 @@ HS_TEST_SRCS = \
        test/hs/Test/Ganeti/Locking/Locks.hs \
        test/hs/Test/Ganeti/Locking/Waiting.hs \
        test/hs/Test/Ganeti/Network.hs \
+       test/hs/Test/Ganeti/PartialParams.hs \
        test/hs/Test/Ganeti/Objects.hs \
        test/hs/Test/Ganeti/Objects/BitArray.hs \
        test/hs/Test/Ganeti/OpCodes.hs \
@@ -1273,41 +1306,15 @@ install-exec-hook:
 
 HS_SRCS = $(HS_LIBTESTBUILT_SRCS)
 
-# select the last line of output and extract the version number,
-# padding with 0s if needed
-hs-pkg-versions:
-       ghc-pkg list --simple-output lens \
-       | sed -r -e '$$!d' \
-         -e 's/^lens-([0-9]+(\.[0-9]+)*)/\1 0 0 0/' \
-         -e 's/\./ /g' -e 's/([0-9]+) *([0-9]+) *([0-9]+) .*/\
-             -DLENS_MAJOR=\1 -DLENS_MINOR=\2 -DLENS_REV=\3/' \
-         -e 's/^\s*//' \
-       > $@
-       ghc-pkg list --simple-output monad-control \
-       | sed -r -e '$$!d' \
-         -e 's/^monad-control-([0-9]+(\.[0-9]+)*)/\1 0 0 0/' \
-         -e 's/\./ /g' -e 's/([0-9]+) *([0-9]+) *([0-9]+) .*/\
-          -DMONAD_CONTROL_MAJOR=\1 -DMONAD_CONTROL_MINOR=\2 -DMONAD_CONTROL_REV=\3/'\
-         -e 's/^\s*//' \
-       >> $@
-       ghc-pkg list --simple-output QuickCheck \
-       | sed -r -e '$$!d' \
-         -e 's/^QuickCheck-([0-9]+(\.[0-9]+)*)/\1 0 0 0/' \
-         -e 's/\./ /g' -e 's/([0-9]+) *([0-9]+) *([0-9]+) .*/\
-          -DQUICKCHECK_MAJOR=\1 -DQUICKCHECK_MINOR=\2 -DQUICKCHECK_REV=\3/'\
-         -e 's/^\s*//' \
-       >> $@
-
 HS_MAKEFILE_GHC_SRCS = $(HS_SRC_PROGS:%=%.hs)
 if WANT_HSTESTS
 HS_MAKEFILE_GHC_SRCS += $(HS_TEST_PROGS:%=%.hs)
 endif
-Makefile.ghc: $(HS_MAKEFILE_GHC_SRCS) Makefile hs-pkg-versions \
+Makefile.ghc: $(HS_MAKEFILE_GHC_SRCS) Makefile $(HASKELL_PACKAGE_VERSIONS_FILE) \
               | $(built_base_sources) $(HS_BUILT_SRCS)
        $(GHC) -M -dep-makefile $@ $(DEP_SUFFIXES) $(HFLAGS) $(HFLAGS_DYNAMIC) \
                -itest/hs \
-         $(shell cat hs-pkg-versions) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(HS_MAKEFILE_GHC_SRCS)
+               $(HEXTRA_COMBINED) $(HS_MAKEFILE_GHC_SRCS)
 # Since ghc -M does not generate dependency line for object files, dependencies
 # from a target executable seed object (e.g. src/hluxid.o) to objects which
 # finally will be linked to the target object (e.g. src/Ganeti/Daemon.o) are
@@ -1322,33 +1329,60 @@ Makefile.ghc: $(HS_MAKEFILE_GHC_SRCS) Makefile hs-pkg-versions \
 
 @include_makefile_ghc@
 
+# Contains the package-id flags for the current build: "-package-id" followed
+# by the name and hash of the package, one for each dependency.
+# Obtained from the setup-config using the Cabal API
+# (CabalDependenciesMacros.hs) after `cabal configure`.
+# This file is created along with HASKELL_PACKAGE_VERSIONS_FILE; if you want
+# to depend on it in a rule, depend on HASKELL_PACKAGE_VERSIONS_FILE instead.
+HASKELL_PACKAGE_IDS_FILE = ganeti.depsflags
+
+# Defines the MIN_VERSION_* macros for all Haskell packages used in this
+# compilation.
+# The versions are determined using `cabal configure`, which takes them from
+# the ghc-pkg database.
+# At the moment, we don't support cabal sandboxes, so we use cabal configure
+# with the --user flag.
+# Note: `cabal configure` and CabalDependenciesMacros.hs perform no
+# downloading (only `cabal install` can do that).
+HASKELL_PACKAGE_VERSIONS_FILE = cabal_macros.h
+
+$(HASKELL_PACKAGE_VERSIONS_FILE): Makefile ganeti.cabal \
+                                  cabal/CabalDependenciesMacros.hs
+       touch empty-cabal-config
+       $(CABAL) --config-file=empty-cabal-config configure --user \
+         -f`test $(HTEST) == yes && echo "htest" || echo "-htest"` \
+         -f`test $(ENABLE_MOND) == True && echo "mond" || echo "-mond"` \
+         -f`test $(ENABLE_METADATA) == True && echo "metad" || echo "-metad"`
+       runhaskell $(abs_top_srcdir)/cabal/CabalDependenciesMacros.hs \
+         ganeti.cabal \
+         $(HASKELL_PACKAGE_IDS_FILE) \
+         $(HASKELL_PACKAGE_VERSIONS_FILE)
+
 # Like the %.o rule, but allows access to the test/hs directory.
 # This uses HFLAGS instead of HTEST_FLAGS because it's only for generating
 # object files (.o for GHC <= 7.6, .o/.so for newer GHCs) that are loaded
 # in GHCI when evaluating TH. The actual test-with-coverage .hpc_o files
 # are created in the `%.$(HTEST_SUFFIX)_o` rule.
-test/hs/%.o: hs-pkg-versions
-       @echo '[GHC|test]: $@ <- $^'
-       @$(GHC) -c $(HFLAGS) $(HFLAGS_DYNAMIC) -itest/hs \
-         $(shell cat hs-pkg-versions) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.o=%.hs)
+test/hs/%.o: $(HASKELL_PACKAGE_VERSIONS_FILE)
+       @echo '[GHC|test]: $@ <- test/hs/$^'
+       @$(GHC) -c $(HFLAGS) -itest/hs $(HFLAGS_DYNAMIC) \
+               $(HEXTRA_COMBINED) $(@:%.o=%.hs)
 
-%.o: hs-pkg-versions
+%.o: $(HASKELL_PACKAGE_VERSIONS_FILE)
        @echo '[GHC]: $@ <- $^'
        @$(GHC) -c $(HFLAGS) $(HFLAGS_DYNAMIC) \
-         $(shell cat hs-pkg-versions) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.o=%.hs)
+               $(HEXTRA_COMBINED) $(@:%.o=%.hs)
 
 # For TH+profiling we need to compile twice: Once without profiling,
 # and then once with profiling. See
 # http://www.haskell.org/ghc/docs/7.0.4/html/users_guide/template-haskell.html#id636646
 if HPROFILE
-%.$(HPROF_SUFFIX)_o: %.o hs-pkg-versions
+%.$(HPROF_SUFFIX)_o: %.o
        @echo '[GHC|prof]: $@ <- $^'
        @$(GHC) -c $(HFLAGS) \
-         $(shell cat hs-pkg-versions) \
          $(HPROFFLAGS) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) \
+               $(HEXTRA_COMBINED) \
                $(@:%.$(HPROF_SUFFIX)_o=%.hs)
 endif
 
@@ -1356,11 +1390,10 @@ endif
 # file for GHC > 7.6 ghci dynamic loading for TH, and creating the .o file
 # will create the .so file since we use -dynamic-too (using the `test/hs/%.o`
 # rule).
-%.$(HTEST_SUFFIX)_o: %.o hs-pkg-versions
+%.$(HTEST_SUFFIX)_o: %.o
        @echo '[GHC|test]: $@ <- $^'
        @$(GHC) -c $(HTEST_FLAGS) \
-         $(shell cat hs-pkg-versions) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.$(HTEST_SUFFIX)_o=%.hs)
+               $(HEXTRA_COMBINED) $(@:%.$(HTEST_SUFFIX)_o=%.hs)
 
 %.hi: %.o ;
 %.$(HTEST_SUFFIX)_hi: %.$(HTEST_SUFFIX)_o ;
@@ -1370,19 +1403,17 @@ if HPROFILE
 $(HS_SRC_PROGS): %: %.$(HPROF_SUFFIX)_o | stamp-directories
        @echo '[GHC-link]: $@'
        $(GHC) $(HFLAGS) $(HPROFFLAGS) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+               $(HEXTRA_COMBINED) --make $(@:%=%.hs)
 else
-$(HS_SRC_PROGS): %: %.o hs-pkg-versions | stamp-directories
-endif
+$(HS_SRC_PROGS): %: %.o | stamp-directories
        @echo '[GHC-link]: $@'
        $(GHC) $(HFLAGS) $(HFLAGS_DYNAMIC) \
-         $(shell cat hs-pkg-versions) \
-               $(HPROFFLAGS) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+               $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+endif
        @rm -f $(notdir $@).tix
        @touch "$@"
 
-$(HS_TEST_PROGS): %: %.$(HTEST_SUFFIX)_o hs-pkg-versions \
+$(HS_TEST_PROGS): %: %.$(HTEST_SUFFIX)_o \
                           | stamp-directories $(built_python_sources)
        @if [ "$(HS_NODEV)" ]; then \
          echo "Error: cannot run unittests without the development" \
@@ -1391,8 +1422,7 @@ $(HS_TEST_PROGS): %: %.$(HTEST_SUFFIX)_o hs-pkg-versions \
        fi
        @echo '[GHC-link|test]: $@'
        $(GHC) $(HTEST_FLAGS) \
-         $(shell cat hs-pkg-versions) \
-               $(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+               $(HEXTRA_COMBINED) --make $(@:%=%.hs)
        @rm -f $(notdir $@).tix
        @touch "$@"
 
@@ -1523,6 +1553,7 @@ EXTRA_DIST += \
        autotools/sphinx-wrapper \
        autotools/testrunner \
        autotools/wrong-hardcoded-paths \
+       cabal/cabal-from-modules.py \
        $(RUN_IN_TEMPDIR) \
        daemons/daemon-util.in \
        daemons/ganeti-cleaner.in \
@@ -1553,9 +1584,13 @@ EXTRA_DIST += \
        doc/users/groupmemberships.in \
        doc/users/groups.in \
        doc/users/users.in \
+       ganeti.cabal \
+       cabal/ganeti.template.cabal \
+       cabal/CabalDependenciesMacros.hs \
        $(dist_TESTS) \
        $(TEST_FILES) \
        $(python_test_support) \
+       $(python_test_utils) \
        man/footer.rst \
        $(manrst) \
        $(maninput) \
@@ -1629,6 +1664,7 @@ TEST_FILES = \
        test/data/htools/hail-alloc-invalid-network.json \
        test/data/htools/hail-alloc-invalid-twodisks.json \
        test/data/htools/hail-alloc-restricted-network.json \
+       test/data/htools/hail-alloc-nlocation.json \
        test/data/htools/hail-alloc-plain-tags.json \
        test/data/htools/hail-alloc-spindles.json \
        test/data/htools/hail-alloc-twodisks.json \
@@ -1641,6 +1677,9 @@ TEST_FILES = \
        test/data/htools/hbal-dyn.data \
        test/data/htools/hbal-evac.data \
        test/data/htools/hbal-excl-tags.data \
+       test/data/htools/hbal-forth.data \
+       test/data/htools/hbal-location-1.data \
+       test/data/htools/hbal-location-2.data \
        test/data/htools/hbal-migration-1.data \
        test/data/htools/hbal-migration-2.data \
        test/data/htools/hbal-migration-3.data \
@@ -1673,6 +1712,7 @@ TEST_FILES = \
        test/data/htools/hsqueeze-overutilized.data \
        test/data/htools/hsqueeze-underutilized.data \
        test/data/htools/unique-reboot-order.data \
+       test/data/mond-data.txt \
        test/hs/shelltests/htools-balancing.test \
        test/hs/shelltests/htools-basic.test \
        test/hs/shelltests/htools-dynutil.test \
@@ -1720,6 +1760,7 @@ TEST_FILES = \
        test/data/cluster_config_2.10.json \
        test/data/cluster_config_2.11.json \
        test/data/cluster_config_2.12.json \
+       test/data/cluster_config_2.13.json \
        test/data/instance-minor-pairing.txt \
        test/data/instance-disks.txt \
        test/data/ip-addr-show-dummy0.txt \
@@ -1899,13 +1940,13 @@ python_tests = \
 python_test_support = \
        test/py/__init__.py \
        test/py/lockperf.py \
-       test/py/testutils.py \
        test/py/testutils_ssh.py \
        test/py/mocks.py \
+       test/py/testutils/__init__.py \
+       test/py/testutils/config_mock.py \
        test/py/cmdlib/__init__.py \
        test/py/cmdlib/testsupport/__init__.py \
        test/py/cmdlib/testsupport/cmdlib_testcase.py \
-       test/py/cmdlib/testsupport/config_mock.py \
        test/py/cmdlib/testsupport/iallocator_mock.py \
        test/py/cmdlib/testsupport/livelock_mock.py \
        test/py/cmdlib/testsupport/netutils_mock.py \
@@ -1968,6 +2009,8 @@ all_python_code = \
        $(pkgpython_PYTHON) \
        $(client_PYTHON) \
        $(cmdlib_PYTHON) \
+       $(cmdlib_cluster_PYTHON) \
+       $(config_PYTHON) \
        $(hypervisor_PYTHON) \
        $(hypervisor_hv_kvm_PYTHON) \
        $(jqueue_PYTHON) \
@@ -1989,6 +2032,7 @@ all_python_code = \
 if PY_UNIT
 all_python_code += $(python_tests)
 all_python_code += $(python_test_support)
+all_python_code += $(python_test_utils)
 endif
 
 srclink_files = \
@@ -2043,6 +2087,7 @@ pep8_python_code = \
        $(gnt_python_sbin_SCRIPTS) \
        qa \
        $(python_test_support)
+       $(python_test_utils)
 
 test/py/daemon-util_unittest.bash: daemons/daemon-util
 
@@ -2329,7 +2374,9 @@ lib/opcodes.py: Makefile src/hs2py lib/opcodes.py.in_before \
        src/hs2py --opcodes >> $@
        cat $(abs_top_srcdir)/lib/opcodes.py.in_after >> $@
 
-lib/_generated_rpc.py: lib/rpc_defs.py $(BUILD_RPC)
+# Generating the RPC wrappers depends on many things, so make sure
+# it's built at the end of the built sources
+lib/_generated_rpc.py: lib/rpc_defs.py $(BUILD_RPC) | $(built_base_sources) $(built_python_base_sources)
        PYTHONPATH=. $(RUN_IN_TEMPDIR) $(CURDIR)/$(BUILD_RPC) lib/rpc_defs.py > $@
 
 lib/rpc/stub/wconfd.py: Makefile src/hs2py | stamp-directories
@@ -2619,7 +2666,7 @@ pylint-test: $(GENERATED_FILES)
        @test -n "$(PYLINT)" || { echo 'pylint' not found during configure; exit 1; }
        cd $(top_srcdir) && \
                PYTHONPATH=.:./test/py $(PYLINT) $(LINT_OPTS_ALL) \
-               --rcfile=pylintrc-test  $(python_test_support)
+               --rcfile=pylintrc-test  $(python_test_support) $(python_test_utils)
 
 .PHONY: pep8
 pep8: $(GENERATED_FILES)
@@ -2640,7 +2687,9 @@ hlint: $(HS_BUILT_SRCS) src/lint-hints.hs
          --ignore "Use &&" \
          --ignore "Use void" \
          --ignore "Reduce duplication" \
+         --ignore "Use import/export shortcut" \
          --hint src/lint-hints \
+         --cpp-file=$(HASKELL_PACKAGE_VERSIONS_FILE) \
          $(filter-out $(HLINT_EXCLUDES),$(HS_LIBTEST_SRCS) $(HS_PROG_SRCS))
        @if [ ! -f doc/hs-lint.html ]; then \
          echo "All good" > doc/hs-lint.html; \
@@ -2776,12 +2825,7 @@ $(APIDOC_HS_DIR)/index.html: $(HS_LIBTESTBUILT_SRCS) Makefile
        set -e ; \
        export LC_ALL=en_US.UTF-8; \
        OPTGHC="--optghc=-isrc --optghc=-itest/hs"; \
-       if [ "$(HS_PARALLEL3)" ]; \
-       then OPTGHC="$$OPTGHC --optghc=$(HS_PARALLEL3)"; \
-       fi; \
-       if [ "$(HS_REGEX_PCRE)" ]; \
-       then OPTGHC="$$OPTGHC --optghc=$(HS_REGEX_PCRE)"; \
-       fi; \
+       OPTGHC="$$OPTGHC --optghc=-optP-include --optghc=-optP$(HASKELL_PACKAGE_VERSIONS_FILE)"; \
        for file in $(HS_LIBTESTBUILT_SRCS); do \
          f_nosrc=$${file##src/}; \
          f_notst=$${f_nosrc##test/hs/}; \
@@ -2796,15 +2840,13 @@ $(APIDOC_HS_DIR)/index.html: $(HS_LIBTESTBUILT_SRCS) Makefile
          $(HS_LIBTESTBUILT_SRCS)
 
 .PHONY: TAGS
-TAGS: $(GENERATED_FILES) hs-pkg-versions
+TAGS: $(GENERATED_FILES)
        rm -f TAGS
        $(GHC) -e ":etags TAGS_hs" -v0 \
          $(filter-out -O -Werror,$(HFLAGS)) \
-         $(shell cat hs-pkg-versions) \
                -osuf tags.o \
                -hisuf tags.hi \
     -lcurl \
-         $(HS_PARALLEL3) $(HS_REGEX_PCRE) \
          $(HS_LIBTEST_SRCS)
        find . -path './lib/*.py' -o -path './scripts/gnt-*' -o \
          -path './daemons/ganeti-*' -o -path './tools/*' -o \
@@ -2884,6 +2926,39 @@ gitignore-check:
 .PHONY: man
 man: $(man_MANS) $(manhtml)
 
+CABAL_EXECUTABLES = $(HS_DEFAULT_PROGS)
+CABAL_EXECUTABLES_HS = $(patsubst %,%.hs,$(CABAL_EXECUTABLES))
+CABAL_EXECUTABLES_APPS_STAMPS = $(patsubst src/%,apps/%.hs.stamp,$(patsubst test/hs/%,apps/%.hs.stamp,$(CABAL_EXECUTABLES)))
+
+# Executable symlinks
+apps/%.hs.stamp: Makefile
+       mkdir -p apps
+       rm -f $(basename $@)
+       ln -s ../$(filter %/$(basename $(notdir $@)),$(CABAL_EXECUTABLES_HS)) $(basename $@)
+       touch $@
+
+# Builds the cabal file
+ganeti.cabal: cabal/ganeti.template.cabal Makefile cabal/cabal-from-modules.py $(CABAL_EXECUTABLES_APPS_STAMPS)
+       @echo $(subst /,.,$(patsubst %.hs,%,$(patsubst test/hs/%,%,$(patsubst src/%,%,$(HS_SRCS))))) \
+         | python $(abs_top_srcdir)/cabal/cabal-from-modules.py $(abs_top_srcdir)/cabal/ganeti.template.cabal > $@
+
+       for p in $(CABAL_EXECUTABLES); do \
+         echo                                   >> $@; \
+         echo "executable `basename $$p`"       >> $@; \
+         echo "  hs-source-dirs: apps"          >> $@; \
+         echo "  main-is: `basename $$p`.hs"    >> $@; \
+         echo "  default-language: Haskell2010" >> $@; \
+         echo "  build-depends:"                >> $@; \
+         echo "      base"                      >> $@; \
+         echo "    , ganeti"                    >> $@; \
+         if [ $$p == test/hs/htest ]; then \
+           echo "    , hslogger"                  >> $@; \
+           echo "    , test-framework"            >> $@; \
+         elif [ $$p == src/rpc-test ]; then \
+           echo "    , json"                      >> $@; \
+         fi \
+       done
+
 # Target that builds all binaries (including those that are not
 # rebuilt except when running the tests)
 .PHONY: really-all
diff --git a/NEWS b/NEWS
index d094086..c6f0739 100644 (file)
--- a/NEWS
+++ b/NEWS
@@ -2,6 +2,154 @@ News
 ====
 
 
+Version 2.14.1
+--------------
+
+*(Released Fri, 10 Jul 2015)*
+
+Incompatible/important changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- The SSH security changes reduced the number of nodes which can SSH into
+  other nodes. Unfortunately enough, the Ganeti implementation of migration
+  for the xl stack of Xen required SSH to be able to migrate the instance,
+  leading to a situation where full movement of an instance around the cluster
+  was not possible. This version fixes the issue by using socat to transfer
+  instance data. While socat is less secure than SSH, it is about as secure as
+  xm migrations, and occurs over the secondary network if present. As a
+  consequence of this change, Xen instance migrations using xl cannot occur
+  between nodes running 2.14.0 and 2.14.1.
+- This release contains a fix for the problem that different encodings in
+  SSL certificates can break RPC communication (issue 1094). The fix makes
+  it necessary to rerun 'gnt-cluster renew-crypto --new-node-certificates'
+  after the cluster is fully upgraded to 2.14.1
+
+Other Changes
+~~~~~~~~~~~~~
+
+- The ``htools`` now properly work also on shared-storage clusters.
+- Instance moves now work properly also for the plain disk template.
+- Filter-evaluation for run-time data filter was fixed (issue 1100).
+- Various improvements to the documentation have been added.
+
+
+Version 2.14.0
+--------------
+
+*(Released Tue, 2 Jun 2015)*
+
+New features
+~~~~~~~~~~~~
+
+- The build system now enforces external Haskell dependencies to lie in
+  a supported range as declared by our new ganeti.cabal file.
+- Basic support for instance reservations has been added. Instance addition
+  supports a --forthcoming option telling Ganeti to only reserve the resources
+  but not create the actual instance. The instance can later be created with
+  by passing the --commit option to the instance addition command.
+- Node tags starting with htools:nlocation: now have a special meaning to htools(1).
+  They control between which nodes migration is possible, e.g., during hypervisor
+  upgrades. See hbal(1) for details.
+- The node-allocation lock as been removed for good, thus speeding up parallel
+  instance allocation and creation.
+- The external storage interface has been extended by optional ``open``
+  and ``close`` scripts.
+
+New dependencies
+~~~~~~~~~~~~~~~~
+
+- Building the Haskell part of Ganeti now requires Cabal and cabal-install.
+
+Known issues
+~~~~~~~~~~~~
+
+- Under certain conditions instance doesn't get unpaused after live
+  migration (issue #1050)
+
+Since 2.14.0 rc1
+~~~~~~~~~~~~~~~~
+
+- The call to the IAllocator in 'gnt-node evacuate' has been fixed.
+- In opportunistic locking, only ask for those node resource locks where
+  the node lock is held.
+- Lock requests are repeatable now; this avoids failure of a job in a
+  race condition with a signal sent to the job.
+- Various improvements to the QA.
+
+
+Version 2.14.0 rc2
+------------------
+
+*(Released Tue, 19 May 2015)*
+
+This was the second release candidate in the 2.14 series. All important
+changes are listed in the 2.14.0 entry.
+
+Since 2.14.0 rc1
+~~~~~~~~~~~~~~~~
+
+- private parameters are now properly exported to instance create scripts
+- unnecessary config unlocks and upgrades have been removed, improving
+  performance, in particular of cluster verification
+- some rarely occuring file-descriptor leaks have been fixed
+- The checks for orphan and lost volumes have been fixed to also work
+  correctly when multiple volume groups are used.
+
+
+Version 2.14.0 rc1
+------------------
+
+*(Released Wed, 29 Apr 2015)*
+
+This was the first release candidate in the 2.14 series. All important
+changes are listed in the latest 2.14 entry.
+
+Since 2.14.0 beta2
+~~~~~~~~~~~~~~~~~~
+
+The following issue has been fixed:
+
+- A race condition where a badly timed kill of WConfD could lead to
+  an incorrect configuration.
+
+Fixes inherited from the 2.12 branch:
+
+- Upgrade from old versions (2.5 and 2.6) was failing (issues 1070, 1019).
+- gnt-network info outputs wrong external reservations (issue 1068)
+- Refuse to demote master from master capability (issue 1023)
+
+Fixes inherited from the 2.13 branch:
+
+- bugs related to ssh-key handling of master candidate (issues 1045, 1046, 1047)
+
+
+Version 2.14.0 beta2
+--------------------
+
+*(Released Thu, 26 Mar 2015)*
+
+This was the second beta release in the 2.14 series. All important changes
+are listed in the latest 2.14 entry.
+
+Since 2.14.0 beta1
+~~~~~~~~~~~~~~~~~~
+
+The following issues have been fixed:
+
+- Issue 1018: Cluster init (and possibly other jobs) occasionally fail to start
+
+The extension of the external storage interface was not present in 2.14.0 beta1.
+
+
+Version 2.14.0 beta1
+--------------------
+
+*(Released Fri, 13 Feb 2015)*
+
+This was the first beta release of the 2.14 series. All important changes
+are listed in the latest 2.14 entry.
+
+
 Version 2.13.2
 --------------
 
diff --git a/README b/README
index e2898ad..345ef2d 100644 (file)
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-Ganeti 2.13
+Ganeti 2.14
 ===========
 
 For installation instructions, read the INSTALL and the doc/install.rst
diff --git a/cabal/CabalDependenciesMacros.hs b/cabal/CabalDependenciesMacros.hs
new file mode 100644 (file)
index 0000000..e07def7
--- /dev/null
@@ -0,0 +1,38 @@
+module Main where
+
+import Control.Applicative
+import qualified Data.Set as Set
+import qualified Distribution.Simple.Build.Macros as Macros
+import Distribution.Simple.Configure (maybeGetPersistBuildConfig)
+import Distribution.Simple.LocalBuildInfo (externalPackageDeps)
+import Distribution.PackageDescription (packageDescription)
+import Distribution.PackageDescription.Parse (readPackageDescription)
+import Distribution.Text (display)
+import Distribution.Verbosity (normal)
+import System.Environment (getArgs)
+
+
+main :: IO ()
+main = do
+  -- Get paths from program arguments.
+  (cabalPath, depsPath, macrosPath) <- do
+    args <- getArgs
+    case args of
+      [c, d, m] -> return (c, d, m)
+      _         -> error "Expected 3 arguments: cabalPath depsPath macrosPath"
+
+  -- Read the cabal file.
+  pkgDesc <- packageDescription <$> readPackageDescription normal cabalPath
+
+  -- Read the setup-config.
+  m'conf <- maybeGetPersistBuildConfig "dist"
+  case m'conf of
+    Nothing -> error "could not read dist/setup-config"
+    Just conf -> do
+
+      -- Write package dependencies.
+      let deps = map (display . fst) $ externalPackageDeps conf
+      writeFile depsPath (unwords $ map ("-package-id " ++) deps)
+
+      -- Write package MIN_VERSION_* macros.
+      writeFile macrosPath $ Macros.generate pkgDesc conf
diff --git a/cabal/cabal-from-modules.py b/cabal/cabal-from-modules.py
new file mode 100644 (file)
index 0000000..719291d
--- /dev/null
@@ -0,0 +1,8 @@
+import sys
+
+cabal_in = sys.argv[1]
+
+modules = '\n    '.join(sorted(sys.stdin.read().split()))
+template = open(cabal_in).read()
+contents = template.replace('-- AUTOGENERATED_MODULES_HERE', modules)
+sys.stdout.write(contents)
diff --git a/cabal/ganeti.template.cabal b/cabal/ganeti.template.cabal
new file mode 100644 (file)
index 0000000..3813087
--- /dev/null
@@ -0,0 +1,107 @@
+name:                ganeti
+version:             2.14
+homepage:            http://www.ganeti.org
+license:             BSD2
+license-file:        COPYING
+author:              Google Inc.
+maintainer:          ganeti-devel@googlegroups.com
+copyright:           2006-2015 Google Inc.
+category:            System
+build-type:          Simple
+extra-source-files:  README
+cabal-version:       >=1.10
+synopsis:            Cluster-based virtualization management software
+description:
+  Cluster-based virtualization management software
+  .
+  See <http://www.ganeti.org>
+
+
+flag mond
+  description: enable the ganeti monitoring daemon
+  default:     True
+
+flag metad
+  description: enable the ganeti metadata daemon
+  default:     True
+
+flag htest
+  description: enable tests
+  default:     True
+
+
+library
+  exposed-modules:
+    -- AUTOGENERATED_MODULES_HERE
+  -- other-modules:
+  other-extensions:
+      TemplateHaskell
+  build-depends:
+      base                          >= 4.5.0.0
+    , array                         >= 0.4.0.0
+    , bytestring                    >= 0.9.2.1
+    , containers                    >= 0.4.2.1
+    , deepseq                       >= 1.3.0.0
+    , directory                     >= 1.1.0.2
+    , filepath                      >= 1.3.0.0
+    , mtl                           >= 2.1.1
+    , old-time                      >= 1.1.0.0
+    , pretty                        >= 1.1.1.0
+    , process                       >= 1.1.0.1
+    , random                        >= 1.0.1.1
+    , template-haskell              >= 2.7.0.0
+    , text                          >= 0.11.1.13
+    , transformers                  >= 0.3.0.0
+    , unix                          >= 2.5.1.0
+
+    , attoparsec                    >= 0.10.1.1   && < 0.13
+    , base64-bytestring             >= 1.0.0.1    && < 1.1
+    , Crypto                        >= 4.2.4      && < 4.3
+    , curl                          >= 1.3.7      && < 1.4
+    , hinotify                      >= 0.3.2      && < 0.4
+    , hslogger                      >= 1.1.4      && < 1.3
+    , json                          >= 0.5        && < 0.9
+    , lens                          >= 3.10       && < 4.8
+    , lifted-base                   >= 0.2.0.3    && < 0.3
+    , monad-control                 >= 0.3.1.3    && < 1.1
+    , MonadCatchIO-transformers     >= 0.3.0.0    && < 0.4
+    , network                       >= 2.3.0.13   && < 2.7
+    , parallel                      >= 3.2.0.2    && < 3.3
+    , regex-pcre                    >= 0.94.2     && < 0.95
+    , temporary                     >= 1.1.2.3    && < 1.3
+    , transformers-base             >= 0.4.1      && < 0.5
+    , utf8-string                   >= 0.3.7      && < 0.4
+    , zlib                          >= 0.5.3.3    && < 0.6
+
+    -- Executables:
+    -- , happy
+    -- , hscolour
+    -- , shelltestrunner
+
+  if flag(htest)
+    build-depends:
+        HUnit                         >= 1.2.4.2    && < 1.3
+      , QuickCheck                    >= 2.4.2      && < 2.8
+      , test-framework                >= 0.6        && < 0.9
+      , test-framework-hunit          >= 0.2.7      && < 0.4
+      , test-framework-quickcheck2    >= 0.2.12.1   && < 0.4
+
+  if flag(mond)
+    build-depends:
+        PSQueue                       >= 1.1        && < 1.2
+      , snap-core                     >= 0.8.1      && < 0.10
+      , snap-server                   >= 0.8.1      && < 0.10
+
+  if flag(metad)
+    build-depends:
+        snap-core                     >= 0.8.1      && < 0.10
+      , snap-server                   >= 0.8.1      && < 0.10
+
+  hs-source-dirs:
+    src, test/hs
+  build-tools:
+    hsc2hs
+  default-language:
+    Haskell2010
+  ghc-options:
+    -Wall
index c7186ad..42d458f 100644 (file)
@@ -1,7 +1,7 @@
 # Configure script for Ganeti
 m4_define([gnt_version_major], [2])
-m4_define([gnt_version_minor], [13])
-m4_define([gnt_version_revision], [2])
+m4_define([gnt_version_minor], [14])
+m4_define([gnt_version_revision], [1])
 m4_define([gnt_version_suffix], [])
 m4_define([gnt_version_full],
           m4_format([%d.%d.%d%s],
@@ -654,14 +654,15 @@ if test -z "$GHC_PKG"; then
   AC_MSG_FAILURE([ghc-pkg not found, compilation will not be possible])
 fi
 
-# check for modules, first custom/special checks
-AC_MSG_NOTICE([checking for required haskell modules])
-HS_PARALLEL3=
-AC_GHC_PKG_CHECK([parallel-3.*], [HS_PARALLEL3=-DPARALLEL3],
-                 [AC_GHC_PKG_REQUIRE(parallel)], t)
-AC_SUBST(HS_PARALLEL3)
+# Check for cabal
+AC_ARG_VAR(CABAL, [cabal path])
+AC_PATH_PROG(CABAL, [cabal], [])
+if test -z "$CABAL"; then
+  AC_MSG_FAILURE([cabal not found, compilation will not be possible])
+fi
 
-# and now standard modules
+# check for standard modules
+AC_GHC_PKG_REQUIRE(Cabal)
 AC_GHC_PKG_REQUIRE(curl)
 AC_GHC_PKG_REQUIRE(json)
 AC_GHC_PKG_REQUIRE(network)
@@ -741,7 +742,6 @@ if test "$enable_metadata" != no; then
                                  $METAD_PKG]))
   fi
 fi
-AC_SUBST(HS_REGEX_PCRE)
 if test "$has_metad" = True; then
   AC_MSG_NOTICE([Enabling metadata usage])
 fi
index c78fbcb..f1560c1 100755 (executable)
@@ -138,7 +138,7 @@ debootstrap --arch $ARCH $DIST_RELEASE $CHDIR
 
 APT_INSTALL="apt-get install -y --no-install-recommends"
 
-if [ DIST_RELEASE = squeeze ]
+if [ $DIST_RELEASE = squeeze ]
 then
   echo "deb http://backports.debian.org/debian-backports" \
        "$DIST_RELEASE-backports main contrib non-free" \
@@ -173,7 +173,7 @@ function lookup_sha1 {
 function download {
   local FNAME="$1"
   local URL="$2"
-  in_chroot -- wget --output-document="$FNAME" "$URL"
+  in_chroot -- wget --no-check-certificate --output-document="$FNAME" "$URL"
   verify_sha1 "$FNAME" "$( lookup_sha1 "$URL" )"
 }
 
@@ -229,7 +229,7 @@ case $DIST_RELEASE in
         libcurl4-gnutls-dev \
         libpcre3-dev \
         happy \
-        hlint hscolour pandoc \
+        hscolour pandoc \
         graphviz qemu-utils \
         python-docutils \
         python-simplejson \
@@ -290,7 +290,7 @@ case $DIST_RELEASE in
         vector-0.10.9.1 \
         zlib-0.5.4.1 \
         \
-        hlint-1.8.57 \
+        'hlint>=1.9.12' \
         HUnit-1.2.5.2 \
         QuickCheck-2.6 \
         test-framework-0.8.0.3 \
@@ -323,7 +323,7 @@ case $DIST_RELEASE in
       libghc-hslogger-dev libghc-crypto-dev \
       libghc-regex-pcre-dev libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
+      libghc-snap-server-dev libpcre3 libpcre3-dev happy hscolour pandoc \
       libghc-zlib-dev libghc-psqueue-dev \
       cabal-install \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
@@ -359,7 +359,8 @@ case $DIST_RELEASE in
        cabal install --global \
         'base64-bytestring>=1' \
         lens-3.10.2 \
-        'lifted-base>=0.1.2'
+        'lifted-base>=0.1.2' \
+        'hlint>=1.9.12'
 ;;
 
   testing)
@@ -373,14 +374,22 @@ case $DIST_RELEASE in
       libghc-hslogger-dev libghc-crypto-dev \
       libghc-regex-pcre-dev libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
+      libghc-snap-server-dev libpcre3 libpcre3-dev happy hscolour pandoc \
       libghc-zlib-dev libghc-psqueue-dev \
       libghc-base64-bytestring-dev libghc-lens-dev libghc-lifted-base-dev \
+      libghc-cabal-dev \
       cabal-install \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
       python-simplejson python-pycurl python-pyinotify python-paramiko \
       python-bitarray python-ipaddr python-yaml qemu-utils python-coverage pep8 \
       shelltestrunner python-dev pylint openssh-client vim git git-email
+
+    in_chroot -- \
+      cabal update
+
+    in_chroot -- \
+      cabal install --global \
+       'hlint>=1.9.12'
 ;;
 
   precise)
@@ -396,35 +405,43 @@ EOF
     echo "Installing packages"
     in_chroot -- \
       $APT_INSTALL \
-      autoconf automake ghc ghc-haddock libghc-network-dev \
-      libghc-test-framework{,-hunit,-quickcheck2}-dev \
-      libghc-json-dev libghc-curl-dev libghc-hinotify-dev \
+      autoconf automake ghc ghc-haddock \
+      libghc-curl-dev libghc-hinotify-dev \
       libghc-parallel-dev libghc-utf8-string-dev \
-      libghc-hslogger-dev libghc-crypto-dev \
-      libghc-regex-pcre-dev libghc-attoparsec-dev \
+      libghc-crypto-dev \
+      libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev libghc-psqueue-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
+      libghc-cabal-dev \
+      cabal-install \
+      libpcre3 libpcre3-dev happy hscolour pandoc \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
       python-simplejson python-pyinotify python-pycurl python-paramiko \
       python-bitarray python-ipaddr python-yaml qemu-utils python-coverage pep8 \
       python-dev pylint openssh-client vim git git-email \
       build-essential
 
-    echo "Installing cabal packages"
-    in_chroot -- \
-      $APT_INSTALL cabal-install
-
     in_chroot -- \
       cabal update
 
+     # Precise has network-2.4.0.0, which breaks, see
+     #   https://github.com/haskell/network/issues/60
      in_chroot -- \
        cabal install --global \
         'base64-bytestring>=1' \
+        hslogger-1.2.3 \
+        'hlint>=1.9.12' \
+        json-0.7 \
         lens-3.10.2 \
-        'lifted-base>=0.1.2'
-
-    in_chroot -- \
-      cabal install --global shelltestrunner
+        'lifted-base>=0.1.2' \
+        'network>=2.4.0.1' \
+        'regex-pcre>=0.94.4' \
+        parsec-3.1.3 \
+        shelltestrunner \
+        'snap-server>=0.8.1' \
+        test-framework-0.8.0.3 \
+        test-framework-hunit-0.3.0.1 \
+        test-framework-quickcheck2-0.3.0.2 \
+        'transformers>=0.3.0.0'
     ;;
 
   *)
@@ -437,8 +454,10 @@ EOF
       libghc-hslogger-dev libghc-crypto-dev \
       libghc-regex-pcre-dev libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev libghc-psqueue-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
-      libghc-lifted-base-dev \
+      libghc-snap-server-dev libpcre3 libpcre3-dev happy hscolour pandoc \
+      libghc-lens-dev libghc-lifted-base-dev \
+      libghc-cabal-dev \
+      cabal-install \
       libghc-base64-bytestring-dev \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
       python-simplejson python-pyinotify python-pycurl python-paramiko \
@@ -446,6 +465,12 @@ EOF
       shelltestrunner python-dev pylint openssh-client vim git git-email \
       build-essential
 
+    in_chroot -- \
+      cabal update
+
+     in_chroot -- \
+       cabal install --global \
+        'hlint>=1.9.12'
 ;;
 esac
 
diff --git a/doc/design-2.14.rst b/doc/design-2.14.rst
new file mode 100644 (file)
index 0000000..074e65f
--- /dev/null
@@ -0,0 +1,9 @@
+==================
+Ganeti 2.14 design
+==================
+
+The following designs have been partially implemented in Ganeti 2.14.
+
+- :doc:`design-location`
+- :doc:`design-reservations`
+- :doc:`design-configlock`
index e19f0b9..9e650c7 100644 (file)
@@ -73,25 +73,23 @@ modifications.
 In a second step, more specialised read functions will be added to ``WConfD``.
 This will reduce the traffic for reads.
 
+Cached Reads
+------------
+
+As jobs synchronize with each other by means of regular locks, the parts
+of the configuration relevant for a job can only change while a job waits
+for new locks. So, if a job has a copy of the configuration and not asked
+for locks afterwards, all read-only access can be done from that copy. While
+this will not affect the ``ConfigLock``, it saves traffic.
 
 Set-and-release action
 ----------------------
 
 As a typical pattern is to change the configuration and afterwards release
-the ``ConfigLock``. To avoid unncecessary delay in this operation (the next
-modification of the configuration can already happen while the last change
-is written out), WConfD will offer a combined command that will
-
-- set the configuration to the specified value,
-
-- release the config lock,
-
-- and only then wait for the configuration write to finish; it will not
-  wait for confirmation of the lock-release write.
-
-If jobs use this combined command instead of the sequential set followed
-by release, new configuration changes can come in during writeout of the
-current change; in particular, a writeout can contain more than one change.
+the ``ConfigLock``. To avoid unnecessary RPC call overhead, WConfD will offer
+a combined call. To make that call retryable, it will do nothing if the the
+``ConfigLock`` is not held by the caller; in the return value, it will indicate
+if the config lock was held when the call was made.
 
 Short-lived ``ConfigLock``
 --------------------------
@@ -116,3 +114,43 @@ status can still happen, triggered by other requests. Now, if
 ``WConfD`` gets restarted after the lock acquisition, if that happend
 in the name of the job, it would own a lock without knowing about it,
 and hence that lock would never get released.
+
+
+Approaches considered, but not working
+======================================
+
+Set-and-release action with asynchronous writes
+-----------------------------------------------
+
+Approach
+~~~~~~~~
+
+As a typical pattern is to change the configuration and afterwards release
+the ``ConfigLock``. To avoid unnecessary delay in this operation (the next
+modification of the configuration can already happen while the last change
+is written out), WConfD will offer a combined command that will
+
+- set the configuration to the specified value,
+
+- release the config lock,
+
+- and only then wait for the configuration write to finish; it will not
+  wait for confirmation of the lock-release write.
+
+If jobs use this combined command instead of the sequential set followed
+by release, new configuration changes can come in during writeout of the
+current change; in particular, a writeout can contain more than one change.
+
+Problem
+~~~~~~~
+
+This approach works fine, as long as always either ``WConfD`` can do an ordered
+shutdown or the calling process dies as well. If however, we allow random kill
+signals to be sent to individual daemons (e.g., by an out-of-memory killer),
+the following race occurs. A process can ask for a combined write-and-unlock
+operation; while the configuration is still written out, the write out of the
+updated lock status already finishes. Now, if ``WConfD`` forcefully gets killed
+in that very moment, a restarted ``WConfD`` will read the old configuration but
+the new lock status. This will make the calling process believe that its call,
+while it didn't get an answer, succeeded nevertheless, thus resulting in a
+wrong configuration state.
index 18fb75c..74ad409 100644 (file)
@@ -149,10 +149,18 @@ The first two operations will be performed using the config functions
 will be performed using the functions ``AttachInstanceDisk`` and
 ``DetachInstanceDisk``.
 
-Since Ganeti doesn't allow for a `Disk` object to not be attached anywhere (for
-now) we will create two wrapper functions (namely ``AddInstanceDisk`` and
-``RemoveInstanceDisk``) which will add and attach a disk at the same time
-(respectively detach and remove a disk).
+More specifically, the `add` operation will add and attach a disk at the same
+time, using a wrapper that calls the ``AddDisk`` and ``AttachInstanceDisk``
+functions. On the same vein, the `remove` operation will detach and remove a
+disk using a wrapper that calls the ``DetachInstanceDisk`` and
+``RemoveInstanceDisk``. The `attach` and `detach` operations are simpler, in
+the sense that they only call the ``AttachInstanceDisk`` and
+``DetachInstanceDisk`` functions respectively.
+
+It is important to note that the `detach` operation introduces the notion of
+disks that are not attached to any instance. For this reason, the configuration
+checks for detached disks will be removed, as the detached disks can be handled
+by the code.
 
 In addition since Ganeti doesn't allow for a `Disk` object to be attached to
 more than one `Instance` at once, when attaching a disk to an instance we have
@@ -169,6 +177,32 @@ with the instance's disk objects. So in the backend we will only have to
 replace the ``disks`` slot with ``disks_info``.
 
 
+Supporting the old interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The current interface is designed with a uniform disk type in mind and
+this interface should still be supported to not break tools and
+workflows downstream.
+
+The behaviour is fully compatible for instances with constantly
+attached, uniform disks.
+
+Whenever an operation operates on an instance, the operation will only
+consider the disks attached. If the operation is specific to a disk
+type, it will throw an error if any disks of a type not supported are
+attached.
+
+When setting the disk template of an instance, we convert all currently
+attached disks to that template. This means that all disk types
+currently attached must be convertible to the new template.
+
+Since the disk template as a configuration value is going away, it needs
+to be replaced for queries. If the instance has no disks, the
+disk_template will be 'diskless', if it has disks of a single type, its
+disk_template will be that type, and if it has disks of multiple types,
+the new disk template 'mixed' will be returned.
+
+
 Eliminating the disk template from the instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -196,10 +230,39 @@ fall into the following general categories:
    required. This is incompatible as well and will need to be listed in
    the NEWS file.
 
+Attach/Detach disks from cli
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. TODO: Locks for Disk objects
+The `attach`/`detach` options should be available through the command
+``gnt-instance modify``. Like the `add`/`remove` options, the `attach`/`detach`
+options can be invoked using the legacy syntax or the new syntax that supports
+indexes. For the attach option, we can refer to the disk using either its
+`name` or `uuid`. The detach option on the other hand has the same syntax as
+the remove option, and we can refer to a disk by its `name`, `uuid` or `index`
+in the instance.
+
+The attach/detach syntax can be seen below:
 
-.. TODO: Attach/Detach disks
+* **Legacy syntax**
+
+  .. code-block:: bash
+
+    gnt-instance modify --disk attach,name=*NAME* *INSTANCE*
+    gnt-instance modify --disk attach,uuid=*UUID* *INSTANCE*
+    gnt-instance modify --disk detach *INSTANCE*
+
+* **New syntax**
+
+  .. code-block:: bash
+
+    gnt-instance modify --disk *N*:attach,name=*NAME* *INSTANCE*
+    gnt-instance modify --disk *N*:attach,uuid=*UUID* *INSTANCE*
+    gnt-instance modify --disk *N*:detach *INSTANCE*
+    gnt-instance modify --disk *NAME*:detach *INSTANCE*
+    gnt-instance modify --disk *UUID*:detach *INSTANCE*
+
+
+.. TODO: Locks for Disk objects
 
 .. TODO: LUs for disks
 
index bc6735c..7aab851 100644 (file)
@@ -2,7 +2,7 @@
 Design document drafts
 ======================
 
-.. Last updated for Ganeti 2.13
+.. Last updated for Ganeti 2.14
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/design-file-based-disks-ownership.rst b/doc/design-file-based-disks-ownership.rst
new file mode 100644 (file)
index 0000000..fc89168
--- /dev/null
@@ -0,0 +1,67 @@
+=================================
+Ganeti file-based disks ownership
+=================================
+
+.. contents:: :depth: 2
+
+This design document explains the issue that emerges from the usage of the
+`detach` operation to file-based disks and provides a simple solution to it.
+Note that this design document applies only to disks of template `file` and
+`sharedfile`, but not `gluster`. However, for brevity reasons these templates
+will go under the umbrella term `file-based`.
+
+Current state and shortcomings
+==============================
+
+When creating a file-based disk, Ganeti stores it inside a specific directory,
+called `file_storage_dir`. Inside this directory, there is a folder for each
+file-based instance and inside each folder are the files for the instance's
+disks (e.g. ``<file_storage_dir>/<instance_name>/<disk_name>``). This way of
+storing disks seems simple enough, but the
+`detach` operation does not work well with it. The reason is that if a disk is
+detached from an instance and attached to another one, the file will remain to
+the folder of the original instance.
+
+This means that if we try to destroy an instance with detached disks, Ganeti
+will correctly complain that the instance folder still has disk data. In more
+high-level terms, we need to find a way to resolve the issue of disk ownership
+at the filesystem level for file-based instances.
+
+Proposed changes
+================
+
+The change we propose is simple. Once a disk is detached from an instance, it
+will be moved out of the instance's folder. The new location will be the
+`file_storage_dir`, i.e. the disk will reside on the same level as the instance
+folders. In order to maintain a consistent configuration, the logical_id of the
+disk will be updated to point to the new path.
+
+Similarly, on the `attach` operation, the file name and logical id will change
+and the disk will be moved under the new instance's directory.
+
+Implementation details
+======================
+
+Detach operation
+~~~~~~~~~~~~~~~~
+
+Before detaching a disk from an instance, we do the following:
+
+1. Transform the current path to the new one.
+
+   <file_storage_dir>/<instance_name>/<disk_name> --> <file_storage_dir>/<disk_name>
+
+2. Use the rpc call ``call_blockdev_rename`` to move the disk to the new path.
+3. Store the new ``logical_id`` to the configuration.
+
+Attach operation
+~~~~~~~~~~~~~~~~
+
+Before attaching a disk to an instance, we do the following:
+
+1. Create the new path for the file disk. In order to construct it properly,
+   use the ``GenerateDiskTemplate`` function to create a dummy disk template
+   and get its ``logical_id``. The new ``logical_id`` contains the new path for
+   the file disk.
+2. Use the rpc call ``call_blockdev_rename`` to move the disk to the new path.
+3. Store the new ``logical_id`` to the configuration.
index 5d4989a..9d0f7aa 100644 (file)
@@ -54,13 +54,16 @@ The following components will be added cluster metric, weighed appropriately.
 
 The weights for these components might have to be tuned as experience with these
 setups grows, but as a starting point, both components will have a weight of
-0.5 each. In this way, any common-failure violations are less important than
-any hard constraints missed (instances on offline nodes, N+1 redundancy,
-exclusion tags) so that the hard constraints will be restored first when
-balancing a cluster. Nevertheless, with weight 0.5 the new common-failure
-components will still be significantly more important than all the balancedness
-components (cpu, disk, memory), as the latter are standard deviations of
-fractions.
+1.0 each. In this way, any common-failure violations are less important than
+any hard constraints missed (like instances on offline nodes) so that
+the hard constraints will be restored first when balancing a cluster.
+Nevertheless, with weight 1.0 the new common-failure components will
+still be significantly more important than all the balancedness components
+(cpu, disk, memory), as the latter are standard deviations of fractions.
+It will also dominate the disk load component which, which, when only taking
+static information into account, essentially amounts to counting disks. In
+this way, Ganeti will be willing to sacrifice equal numbers of disks on every
+node in order to fulfill location requirements.
 
 Appart from changing the balancedness metric, common-failure tags will
 not have any other effect. In particular, as opposed to exclusion tags,
index 793c522..0390264 100644 (file)
@@ -185,7 +185,9 @@ An “ExtStorage provider” will have to provide the following methods:
 - Detach a disk from a given node
 - SetInfo to a disk (add metadata)
 - Verify its supported parameters
-- Snapshot a disk (currently used during gnt-backup export)
+- Snapshot a disk (optional)
+- Open a disk (optional)
+- Close a disk (optional)
 
 The proposed ExtStorage interface borrows heavily from the OS
 interface and follows a one-script-per-function approach. An ExtStorage
@@ -199,16 +201,19 @@ provider is expected to provide the following scripts:
 - ``setinfo``
 - ``verify``
 - ``snapshot`` (optional)
+- ``open`` (optional)
+- ``close`` (optional)
 
 All scripts will be called with no arguments and get their input via
 environment variables. A common set of variables will be exported for
-all commands, and some of them might have extra ones.
+all commands, and some commands might have extra variables.
 
 ``VOL_NAME``
   The name of the volume. This is unique for Ganeti and it
   uses it to refer to a specific volume inside the external storage.
 ``VOL_SIZE``
   The volume's size in mebibytes.
+  Available only to the `create` and `grow` scripts.
 ``VOL_NEW_SIZE``
   Available only to the `grow` script. It declares the
   new size of the volume after grow (in mebibytes).
@@ -221,11 +226,14 @@ all commands, and some of them might have extra ones.
 ``VOL_CNAME``
   The human readable name of the disk (if any).
 ``VOL_SNAPSHOT_NAME``
-  The name of the volume's snapshot to be taken.
+  The name of the volume's snapshot.
   Available only to the `snapshot` script.
 ``VOL_SNAPSHOT_SIZE``
-  The size of the volume's snapshot to be taken.
+  The size of the volume's snapshot.
   Available only to the `snapshot` script.
+``VOL_OPEN_EXCLUSIVE``
+  Whether the volume will be accessed exclusively or not.
+  Available only to the `open` script.
 
 All scripts except `attach` should return 0 on success and non-zero on
 error, accompanied by an appropriate error message on stderr. The
@@ -233,9 +241,14 @@ error, accompanied by an appropriate error message on stderr. The
 the block device's full path, after it has been successfully attached to
 the host node. On error it should return non-zero.
 
-To keep backwards compatibility we let the ``snapshot`` script be
-optional. If present then the provider will support instance backup
-export as well.
+The ``snapshot``, ``open`` and ``close`` scripts are introduced after
+the first implementation of the ExtStorage Interface. To keep backwards
+compatibility with the first implementation, we make these scripts
+optional.
+
+The ``snapshot`` script, if present, will be used for instance backup
+export. The ``open`` script makes the device ready for I/O. The ``close``
+script disables the I/O on the device.
 
 Implementation
 --------------
@@ -243,7 +256,8 @@ Implementation
 To support the ExtStorage interface, we will introduce a new disk
 template called `ext`. This template will implement the existing Ganeti
 disk interface in `lib/bdev.py` (create, remove, attach, assemble,
-shutdown, grow, setinfo), and will simultaneously pass control to the
+shutdown, grow, setinfo, open, close),
+and will simultaneously pass control to the
 external scripts to actually handle the above actions. The `ext` disk
 template will act as a translation layer between the current Ganeti disk
 interface and the ExtStorage providers.
@@ -315,7 +329,9 @@ with the hypervisor it corresponds to (e.g. kvm:<uri>). The prefix will
 be case insensitive. If the 'attach' script doesn't return any extra
 lines, we assume that the ExtStorage provider doesn't support userspace
 access (this way we maintain backward compatibility with the existing
-'attach' scripts).
+'attach' scripts). In case the provider supports *only* userspace
+access and thus a local block device is not available, then the first
+line should be an empty line.
 
 The 'GetUserspaceAccessUri' method of the 'ExtStorageDevice' class will
 parse the output of the 'attach' script and if the provider supports
index 45d375a..f7a0a25 100644 (file)
@@ -1,7 +1,7 @@
 Ganeti customisation using hooks
 ================================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 .. contents::
 
index d5896ea..c5c360f 100644 (file)
@@ -1,7 +1,7 @@
 Ganeti automatic instance allocation
 ====================================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 .. contents::
 
index a95025d..28b37a0 100644 (file)
@@ -79,6 +79,7 @@ and draft versions (which are either incomplete or not implemented).
    design-2.11.rst
    design-2.12.rst
    design-2.13.rst
+   design-2.14.rst
 
 Draft designs
 -------------
@@ -104,6 +105,7 @@ Draft designs
    design-disk-conversion.rst
    design-disks.rst
    design-file-based-storage.rst
+   design-file-based-disks-ownership.rst
    design-hroller.rst
    design-hsqueeze.rst
    design-hotplug.rst
index 31178dc..da66565 100644 (file)
@@ -1,7 +1,7 @@
 Security in Ganeti
 ==================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 Ganeti was developed to run on internal, trusted systems. As such, the
 security model is all-or-nothing.
index 9fd37d9..7fdfc45 100644 (file)
@@ -1,7 +1,7 @@
 Virtual cluster support
 =======================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 .. contents::
 
index 646fc70..f23b5ec 100644 (file)
@@ -2546,6 +2546,10 @@ def _SymlinkBlockDev(instance_name, device_path, idx):
   @return: absolute path to the disk's symlink
 
   """
+  # In case we have only a userspace access URI, device_path is None
+  if not device_path:
+    return None
+
   link_name = _GetBlockDevSymlinkPath(instance_name, idx)
   try:
     os.symlink(device_path, link_name)
@@ -2839,21 +2843,10 @@ def AcceptInstance(instance, info, target):
   @param target: target host (usually ip), on this node
 
   """
-  # TODO: why is this required only for DTS_EXT_MIRROR?
-  if instance.disk_template in constants.DTS_EXT_MIRROR:
-    # Create the symlinks, as the disks are not active
-    # in any way
-    try:
-      _GatherAndLinkBlockDevs(instance)
-    except errors.BlockDeviceError, err:
-      _Fail("Block device error: %s", err, exc=True)
-
   hyper = hypervisor.GetHypervisor(instance.hypervisor)
   try:
     hyper.AcceptInstance(instance, info, target)
   except errors.HypervisorError, err:
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
-      _RemoveBlockDevLinks(instance.name, instance.disks_info)
     _Fail("Failed to accept instance: %s", err, exc=True)
 
 
@@ -4063,9 +4056,13 @@ def OSEnvironment(instance, inst_os, debug=0):
   # Disks
   for idx, disk in enumerate(instance.disks_info):
     real_disk = _OpenRealBD(disk)
-    result["DISK_%d_PATH" % idx] = real_disk.dev_path
+    uri = _CalculateDeviceURI(instance, disk, real_disk)
     result["DISK_%d_ACCESS" % idx] = disk.mode
     result["DISK_%d_UUID" % idx] = disk.uuid
+    if real_disk.dev_path:
+      result["DISK_%d_PATH" % idx] = real_disk.dev_path
+    if uri:
+      result["DISK_%d_URI" % idx] = uri
     if disk.name:
       result["DISK_%d_NAME" % idx] = disk.name
     if constants.HV_DISK_TYPE in instance.hvparams:
@@ -4206,15 +4203,14 @@ def BlockdevSnapshot(disk, snap_name, snap_size):
     else:
       _Fail("Cannot find block device %s", disk)
 
-  if disk.dev_type == constants.DT_DRBD8:
-    if not disk.children:
-      _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
-            disk.unique_id)
-    return BlockdevSnapshot(disk.children[0], snap_name, snap_size)
-  elif disk.dev_type == constants.DT_PLAIN:
-    return _DiskSnapshot(disk, snap_name, snap_size)
-  elif disk.dev_type == constants.DT_EXT:
-    return _DiskSnapshot(disk, snap_name, snap_size)
+  if disk.SupportsSnapshots():
+    if disk.dev_type == constants.DT_DRBD8:
+      if not disk.children:
+        _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
+              disk.unique_id)
+      return BlockdevSnapshot(disk.children[0], snap_name, snap_size)
+    else:
+      return _DiskSnapshot(disk, snap_name, snap_size)
   else:
     _Fail("Cannot snapshot block device '%s' of type '%s'",
           disk.logical_id, disk.dev_type)
@@ -4261,6 +4257,7 @@ def FinalizeExport(instance, snap_disks):
   """
   destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
   finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
+  disk_template = utils.GetDiskTemplate(snap_disks)
 
   config = objects.SerializableConfigParser()
 
@@ -4282,7 +4279,7 @@ def FinalizeExport(instance, snap_disks):
              instance.beparams[constants.BE_MAXMEM])
   config.set(constants.INISECT_INS, "vcpus", "%d" %
              instance.beparams[constants.BE_VCPUS])
-  config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
+  config.set(constants.INISECT_INS, "disk_template", disk_template)
   config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
   config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
 
@@ -4309,7 +4306,7 @@ def FinalizeExport(instance, snap_disks):
       config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
                  ("%s" % disk.iv_name))
       config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
-                 ("%s" % disk.logical_id[1]))
+                 ("%s" % disk.uuid))
       config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
                  ("%d" % disk.size))
       config.set(constants.INISECT_INS, "disk%d_name" % disk_count,
@@ -4624,12 +4621,35 @@ def BlockdevClose(instance_name, disks):
     except errors.BlockDeviceError, err:
       msg.append(str(err))
   if msg:
-    _Fail("Can't make devices secondary: %s", ",".join(msg))
+    _Fail("Can't close devices: %s", ",".join(msg))
   else:
     if instance_name:
       _RemoveBlockDevLinks(instance_name, disks)
 
 
+def BlockdevOpen(instance_name, disks, exclusive):
+  """Opens the given block devices.
+
+  """
+  bdevs = []
+  for cf in disks:
+    rd = _RecursiveFindBD(cf)
+    if rd is None:
+      _Fail("Can't find device %s", cf)
+    bdevs.append(rd)
+
+  msg = []
+  for idx, rd in enumerate(bdevs):
+    try:
+      rd.Open(exclusive=exclusive)
+      _SymlinkBlockDev(instance_name, rd.dev_path, idx)
+    except errors.BlockDeviceError, err:
+      msg.append(str(err))
+
+  if msg:
+    _Fail("Can't open devices: %s", ",".join(msg))
+
+
 def ValidateHVParams(hvname, hvparams):
   """Validates the given hypervisor parameters.
 
@@ -4934,22 +4954,13 @@ def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
 
   elif ieio == constants.IEIO_RAW_DISK:
     (disk, ) = ieargs
-
     real_disk = _OpenRealBD(disk)
 
     if mode == constants.IEM_IMPORT:
-      # we use nocreat to fail if the device is not already there or we pass a
-      # wrong path; we use notrunc to no attempt truncate on an LV device
-      suffix = utils.BuildShellCmd("| dd of=%s conv=nocreat,notrunc bs=%s",
-                                   real_disk.dev_path,
-                                   str(constants.DD_BLOCK_SIZE)) # 1 MB
+      suffix = "| %s" % utils.ShellQuoteArgs(real_disk.Import())
 
     elif mode == constants.IEM_EXPORT:
-      # the block size on the read dd is 1MiB to match our units
-      prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
-                                   real_disk.dev_path,
-                                   str(constants.DD_BLOCK_SIZE), # 1 MB
-                                   str(disk.size))
+      prefix = "%s |" % utils.ShellQuoteArgs(real_disk.Export())
       exp_size = disk.size
 
   elif ieio == constants.IEIO_SCRIPT:
@@ -5018,6 +5029,11 @@ def StartImportExportDaemon(mode, opts, host, port, instance, component,
   @param ieioargs: Input/output arguments
 
   """
+
+  # Use Import/Export over socat.
+  #
+  #   Export() gives a command that produces a flat stream.
+  #   Import() gives a command that reads a flat stream to a disk template.
   if mode == constants.IEM_IMPORT:
     prefix = "import"
 
@@ -5229,18 +5245,12 @@ def DrbdDisconnectNet(disks):
             err, exc=True)
 
 
-def DrbdAttachNet(disks, instance_name, multimaster):
+def DrbdAttachNet(disks, multimaster):
   """Attaches the network on a list of drbd devices.
 
   """
   bdevs = _FindDisks(disks)
 
-  if multimaster:
-    for idx, rd in enumerate(bdevs):
-      try:
-        _SymlinkBlockDev(instance_name, rd.dev_path, idx)
-      except EnvironmentError, err:
-        _Fail("Can't create symlink: %s", err)
   # reconnect disks, switch to new master configuration and if
   # needed primary mode
   for rd in bdevs:
@@ -5294,14 +5304,6 @@ def DrbdAttachNet(disks, instance_name, multimaster):
   except utils.RetryTimeout:
     _Fail("Timeout in disk reconnecting")
 
-  if multimaster:
-    # change to primary mode
-    for rd in bdevs:
-      try:
-        rd.Open()
-      except errors.BlockDeviceError, err:
-        _Fail("Can't change to primary mode: %s", err)
-
 
 def DrbdWaitSync(disks):
   """Wait until DRBDs have synchronized.
index 4b6564c..d649b8e 100644 (file)
@@ -523,7 +523,7 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
       for entry in os.listdir(ddir):
         if not os.path.isdir(os.path.join(ddir, entry)):
           raise errors.OpPrereqError(
-            "%s contains non-directory enries like %s. Remove left-overs of an"
+            "%s contains non-directory entries like %s. Remove left-overs of an"
             " old cluster before initialising a new one" % (ddir, entry),
             errors.ECODE_STATE)
 
index 3fcfd98..2b5a046 100644 (file)
@@ -714,6 +714,10 @@ def GenericPollJob(job_id, cbs, report_cbs):
   @type report_cbs: Instance of L{JobPollReportCbBase}
   @param report_cbs: Reporting callbacks
 
+  @return: the opresult of the job
+  @raise errors.JobLost: If job can't be found
+  @raise errors.OpExecError: If job didn't succeed
+
   """
   prev_job_info = None
   prev_logmsg_serial = None
@@ -1287,6 +1291,13 @@ def GenericInstanceCreate(mode, opts, args):
 
   """
   instance = args[0]
+  forthcoming = opts.ensure_value("forthcoming", False)
+  commit = opts.ensure_value("commit", False)
+
+  if forthcoming and commit:
+    raise errors.OpPrereqError("Creating an instance only forthcoming and"
+                               " commiting it are mutally exclusive",
+                               errors.ECODE_INVAL)
 
   (pnode, snode) = SplitNodeOption(opts.node)
 
@@ -1392,6 +1403,9 @@ def GenericInstanceCreate(mode, opts, args):
     else:
       instance_communication = opts.instance_communication
   elif mode == constants.INSTANCE_IMPORT:
+    if forthcoming:
+      raise errors.OpPrereqError("forthcoming instances can only be created,"
+                                 " not imported")
     start = False
     os_type = None
     force_variant = False
@@ -1405,6 +1419,8 @@ def GenericInstanceCreate(mode, opts, args):
     raise errors.ProgrammerError("Invalid creation mode %s" % mode)
 
   op = opcodes.OpInstanceCreate(
+    forthcoming=forthcoming,
+    commit=commit,
     instance_name=instance,
     disks=disks,
     disk_template=opts.disk_template,
index 79de008..ae58ede 100644 (file)
@@ -60,6 +60,7 @@ __all__ = [
   "CLEANUP_OPT",
   "cli_option",
   "CLUSTER_DOMAIN_SECRET_OPT",
+  "COMMIT_OPT",
   "COMMON_CREATE_OPTS",
   "COMMON_OPTS",
   "COMPRESS_OPT",
@@ -94,6 +95,7 @@ __all__ = [
   "FORCE_FILTER_OPT",
   "FORCE_OPT",
   "FORCE_VARIANT_OPT",
+  "FORTHCOMING_OPT",
   "GATEWAY6_OPT",
   "GATEWAY_OPT",
   "GLOBAL_FILEDIR_OPT",
@@ -118,6 +120,7 @@ __all__ = [
   "IGNORE_OFFLINE_OPT",
   "IGNORE_REMOVE_FAILURES_OPT",
   "IGNORE_SECONDARIES_OPT",
+  "IGNORE_SOFT_ERRORS_OPT",
   "IGNORE_SIZE_OPT",
   "INCLUDEDEFAULTS_OPT",
   "INSTALL_IMAGE_OPT",
@@ -131,6 +134,7 @@ __all__ = [
   "IPOLICY_STD_SPECS_OPT",
   "IPOLICY_STD_SPECS_STR",
   "IPOLICY_VCPU_RATIO",
+  "LONG_SLEEP_OPT",
   "MAC_PREFIX_OPT",
   "MAINTAIN_NODE_HEALTH_OPT",
   "MASTER_NETDEV_OPT",
@@ -583,6 +587,12 @@ IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
                                   help=("Ignore offline nodes and do as much"
                                         " as possible"))
 
+IGNORE_SOFT_ERRORS_OPT = cli_option("--ignore-soft-errors",
+                                    dest="ignore_soft_errors",
+                                    action="store_true", default=False,
+                                    help=("Tell htools to ignore any soft"
+                                          " errors like N+1 violations"))
+
 TAG_ADD_OPT = cli_option("--tags", dest="tags",
                          default=None, help="Comma-separated list of instance"
                                             " tags")
@@ -895,6 +905,16 @@ NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
                          action="store_false",
                          help="Don't start the instance after creation")
 
+FORTHCOMING_OPT = cli_option("--forthcoming", dest="forthcoming",
+                             action="store_true", default=False,
+                             help="Only reserve resources, but do not"
+                                  " create the instance yet")
+
+COMMIT_OPT = cli_option("--commit", dest="commit",
+                        action="store_true", default=False,
+                        help="The instance is already reserved and should"
+                             " be committed now")
+
 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
                          action="store_true", default=False,
                          help="Show command instead of executing it")
@@ -1570,6 +1590,9 @@ VERIFY_CLUTTER_OPT = cli_option(
     help="Verify that Ganeti did not clutter"
     " up the 'authorized_keys' file", action="store_true")
 
+LONG_SLEEP_OPT = cli_option(
+    "--long-sleep", default=False, dest="long_sleep",
+    help="Allow long shutdowns when backing up instances", action="store_true")
 
 #: Options provided by all commands
 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
index 578edf9..89b0e93 100644 (file)
@@ -109,7 +109,8 @@ def ExportInstance(opts, args):
     ignore_remove_failures=ignore_remove_failures,
     zero_free_space=opts.zero_free_space,
     zeroing_timeout_fixed=opts.zeroing_timeout_fixed,
-    zeroing_timeout_per_mib=opts.zeroing_timeout_per_mib
+    zeroing_timeout_per_mib=opts.zeroing_timeout_per_mib,
+    long_sleep=opts.long_sleep
   )
 
   SubmitOrSend(op, opts)
@@ -169,7 +170,7 @@ commands = {
     [FORCE_OPT, SINGLE_NODE_OPT, TRANSPORT_COMPRESSION_OPT, NOSHUTDOWN_OPT,
      SHUTDOWN_TIMEOUT_OPT, REMOVE_INSTANCE_OPT, IGNORE_REMOVE_FAILURES_OPT,
      DRY_RUN_OPT, PRIORITY_OPT, ZERO_FREE_SPACE_OPT, ZEROING_TIMEOUT_FIXED_OPT,
-     ZEROING_TIMEOUT_PER_MIB_OPT] + SUBMIT_OPTS,
+     ZEROING_TIMEOUT_PER_MIB_OPT, LONG_SLEEP_OPT] + SUBMIT_OPTS,
     "-n <target_node> [opts...] <name>",
     "Exports an instance to an image"),
   "import": (
index fa29f9f..27877a7 100644 (file)
@@ -2181,42 +2181,6 @@ def _VersionSpecificDowngrade():
   """
   ToStdout("Performing version-specific downgrade tasks.")
 
-  # Determine if this cluster is set up with SSH handling
-  # (aka not using --no-ssh-init), check if the public
-  # keyfile exists.
-  update_keys = os.path.exists(pathutils.SSH_PUB_KEYS)
-
-  if not update_keys:
-    return True
-
-  ToStdout("Replace nodes' SSH keys with the master's keys.")
-  (_, root_keyfiles) = \
-    ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=False, dircheck=False)
-
-  dsa_root_keyfiles = dict((kind, value) for (kind, value)
-                           in root_keyfiles.items()
-                           if kind == constants.SSHK_DSA)
-  master_private_keyfile, master_public_keyfile = \
-      dsa_root_keyfiles[constants.SSHK_DSA]
-
-  nodes = ssconf.SimpleStore().GetOnlineNodeList()
-  master_node = ssconf.SimpleStore().GetMasterNode()
-  cluster_name = ssconf.SimpleStore().GetClusterName()
-
-  # If master node is in 'nodes', remove it
-  if master_node in nodes:
-    nodes.remove(master_node)
-
-  srun = ssh.SshRunner(cluster_name=cluster_name)
-  for name in nodes:
-    for key_file in [master_private_keyfile, master_public_keyfile]:
-      command = utils.text.ShellQuoteArgs([
-          "scp", key_file, "%s:%s" % (name, key_file)])
-      result = srun.Run(master_node, constants.SSH_LOGIN_USER, command)
-      if result.exit_code != 0:
-        ToStderr("Overiding SSH key '%s' of node '%s' failed. You might"
-                 " want to clean up manually." % (key_file, name))
-
   return True
 
 
index 76ab88e..7a49d60 100644 (file)
@@ -1258,37 +1258,52 @@ def _ConvertNicDiskModifications(mods):
       # Add item as last item (legacy interface)
       action = constants.DDM_ADD
       identifier = -1
+    elif identifier == constants.DDM_ATTACH:
+      # Attach item as last item (legacy interface)
+      action = constants.DDM_ATTACH
+      identifier = -1
     elif identifier == constants.DDM_REMOVE:
       # Remove last item (legacy interface)
       action = constants.DDM_REMOVE
       identifier = -1
+    elif identifier == constants.DDM_DETACH:
+      # Detach last item (legacy interface)
+      action = constants.DDM_DETACH
+      identifier = -1
     else:
-      # Modifications and adding/removing at arbitrary indices
+      # Modifications and adding/attaching/removing/detaching at arbitrary
+      # indices
       add = params.pop(constants.DDM_ADD, _MISSING)
+      attach = params.pop(constants.DDM_ATTACH, _MISSING)
       remove = params.pop(constants.DDM_REMOVE, _MISSING)
+      detach = params.pop(constants.DDM_DETACH, _MISSING)
       modify = params.pop(constants.DDM_MODIFY, _MISSING)
 
-      if modify is _MISSING:
-        if not (add is _MISSING or remove is _MISSING):
-          raise errors.OpPrereqError("Cannot add and remove at the same time",
-                                     errors.ECODE_INVAL)
-        elif add is not _MISSING:
-          action = constants.DDM_ADD
-        elif remove is not _MISSING:
-          action = constants.DDM_REMOVE
-        else:
-          action = constants.DDM_MODIFY
-
-      elif add is _MISSING and remove is _MISSING:
-        action = constants.DDM_MODIFY
-      else:
-        raise errors.OpPrereqError("Cannot modify and add/remove at the"
-                                   " same time", errors.ECODE_INVAL)
+      # Check if the user has requested more than one operation and raise an
+      # exception. If no operations have been given, default to modify.
+      action = constants.DDM_MODIFY
+      ops = {
+        constants.DDM_ADD: add,
+        constants.DDM_ATTACH: attach,
+        constants.DDM_REMOVE: remove,
+        constants.DDM_DETACH: detach,
+        constants.DDM_MODIFY: modify,
+      }
+      count = 0
+      for op, param in ops.items():
+        if param is not _MISSING:
+          count += 1
+          action = op
+      if count > 1:
+        raise errors.OpPrereqError(
+          "Cannot do more than one of the following operations at the"
+          " same time: %s" % ", ".join(ops.keys()),
+          errors.ECODE_INVAL)
 
       assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
 
-    if action == constants.DDM_REMOVE and params:
-      raise errors.OpPrereqError("Not accepting parameters on removal",
+    if action in (constants.DDM_REMOVE, constants.DDM_DETACH) and params:
+      raise errors.OpPrereqError("Not accepting parameters on removal/detach",
                                  errors.ECODE_INVAL)
 
     result.append((action, identifier, params))
@@ -1542,6 +1557,8 @@ m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
 
 # this is defined separately due to readability only
 add_opts = [
+  FORTHCOMING_OPT,
+  COMMIT_OPT,
   NOSTART_OPT,
   OS_OPT,
   FORCE_VARIANT_OPT,
index 4280eee..87f3d19 100644 (file)
@@ -444,7 +444,8 @@ def EvacuateNode(opts, args):
   op = opcodes.OpNodeEvacuate(node_name=args[0], mode=mode,
                               remote_node=opts.dst_node,
                               iallocator=opts.iallocator,
-                              early_release=opts.early_release)
+                              early_release=opts.early_release,
+                              ignore_soft_errors=opts.ignore_soft_errors)
   result = SubmitOrSend(op, opts, cl=cl)
 
   # Keep track of submitted jobs
@@ -1136,8 +1137,9 @@ commands = {
     "Add a node to the cluster"),
   "evacuate": (
     EvacuateNode, ARGS_ONE_NODE,
-    [FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT,
-     PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT] + SUBMIT_OPTS,
+    [FORCE_OPT, IALLOCATOR_OPT, IGNORE_SOFT_ERRORS_OPT, NEW_SECONDARY_OPT,
+     EARLY_RELEASE_OPT, PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT]
+    + SUBMIT_OPTS,
     "[-f] {-I <iallocator> | -n <dst>} [-p | -s] [options...] <node>",
     "Relocate the primary and/or secondary instances from a node"),
   "failover": (
index 8913305..ee02417 100644 (file)
@@ -51,11 +51,12 @@ from ganeti.cmdlib.cluster import \
   LUClusterRename, \
   LUClusterRepairDiskSizes, \
   LUClusterSetParams, \
+  LUClusterRenewCrypto
+from ganeti.cmdlib.cluster.verify import \
   LUClusterVerify, \
   LUClusterVerifyConfig, \
   LUClusterVerifyGroup, \
-  LUClusterVerifyDisks, \
-  LUClusterRenewCrypto
+  LUClusterVerifyDisks
 from ganeti.cmdlib.group import \
   LUGroupAdd, \
   LUGroupAssignNodes, \
@@ -76,13 +77,13 @@ from ganeti.cmdlib.node import \
   LUNodeRemove, \
   LURepairNodeStorage
 from ganeti.cmdlib.instance import \
-  LUInstanceCreate, \
   LUInstanceRename, \
   LUInstanceRemove, \
   LUInstanceMove, \
   LUInstanceMultiAlloc, \
-  LUInstanceSetParams, \
   LUInstanceChangeGroup
+from ganeti.cmdlib.instance_create import \
+  LUInstanceCreate
 from ganeti.cmdlib.instance_storage import \
   LUInstanceRecreateDisks, \
   LUInstanceGrowDisk, \
@@ -98,6 +99,8 @@ from ganeti.cmdlib.instance_operation import \
   LUInstanceReinstall, \
   LUInstanceReboot, \
   LUInstanceConsole
+from ganeti.cmdlib.instance_set_params import \
+  LUInstanceSetParams
 from ganeti.cmdlib.instance_query import \
   LUInstanceQueryData
 from ganeti.cmdlib.backup import \
index d305c94..3486291 100644 (file)
@@ -157,11 +157,6 @@ class LUBackupExport(LogicalUnit):
       #  - removing the removal operation altogether
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
-      # Allocations should be stopped while this LU runs with node locks, but
-      # it doesn't have to be exclusive
-      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
-      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-
   def DeclareLocks(self, level):
     """Last minute lock declaration."""
     # All nodes are locked anyway, so nothing to do here.
@@ -283,13 +278,6 @@ class LUBackupExport(LogicalUnit):
       raise errors.ProgrammerError("Unhandled export mode %r" %
                                    self.op.mode)
 
-    # instance disk type verification
-    # TODO: Implement export support for file-based disks
-    for disk in self.cfg.GetInstanceDisks(self.instance.uuid):
-      if disk.dev_type in constants.DTS_FILEBASED:
-        raise errors.OpPrereqError("Export not supported for instances with"
-                                   " file-based disks", errors.ECODE_INVAL)
-
     # Check prerequisites for zeroing
     if self.op.zero_free_space:
       # Check that user shutdown detection has been enabled
@@ -297,18 +285,19 @@ class LUBackupExport(LogicalUnit):
       if self.instance.hypervisor == constants.HT_KVM and \
          not hvparams.get(constants.HV_KVM_USER_SHUTDOWN, False):
         raise errors.OpPrereqError("Instance shutdown detection must be "
-                                   "enabled for zeroing to work")
+                                   "enabled for zeroing to work",
+                                   errors.ECODE_INVAL)
 
       # Check that the instance is set to boot from the disk
       if constants.HV_BOOT_ORDER in hvparams and \
          hvparams[constants.HV_BOOT_ORDER] != constants.HT_BO_DISK:
         raise errors.OpPrereqError("Booting from disk must be set for zeroing "
-                                   "to work")
+                                   "to work", errors.ECODE_INVAL)
 
       # Check that the zeroing image is set
       if not self.cfg.GetZeroingImage():
         raise errors.OpPrereqError("A zeroing image must be set for zeroing to"
-                                   " work")
+                                   " work", errors.ECODE_INVAL)
 
       if self.op.zeroing_timeout_fixed is None:
         self.op.zeroing_timeout_fixed = constants.HELPER_VM_STARTUP
@@ -320,7 +309,13 @@ class LUBackupExport(LogicalUnit):
       if (self.op.zeroing_timeout_fixed is not None or
           self.op.zeroing_timeout_per_mib is not None):
         raise errors.OpPrereqError("Zeroing timeout options can only be used"
-                                   " only with the --zero-free-space option")
+                                   " only with the --zero-free-space option",
+                                   errors.ECODE_INVAL)
+
+    if self.op.long_sleep and not self.op.shutdown:
+      raise errors.OpPrereqError("The long sleep option only makes sense when"
+                                 " the instance can be shut down.",
+                                 errors.ECODE_INVAL)
 
     self.secondary_nodes = \
       self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
@@ -428,6 +423,34 @@ class LUBackupExport(LogicalUnit):
 
     feedback_fn("Zeroing completed!")
 
+  def StartInstance(self, feedback_fn, src_node_uuid):
+    """Send the node instructions to start the instance.
+
+    @raise errors.OpExecError: If the instance didn't start up.
+
+    """
+    assert self.instance.disks_active
+    feedback_fn("Starting instance %s" % self.instance.name)
+    result = self.rpc.call_instance_start(src_node_uuid,
+                                          (self.instance, None, None),
+                                          False, self.op.reason)
+    msg = result.fail_msg
+    if msg:
+      feedback_fn("Failed to start instance: %s" % msg)
+      ShutdownInstanceDisks(self, self.instance)
+      raise errors.OpExecError("Could not start instance: %s" % msg)
+
+  def TrySnapshot(self):
+    """Returns true if there is a reason to prefer a snapshot."""
+    return (not self.op.remove_instance and
+            self.instance.admin_state == constants.ADMINST_UP)
+
+  def DoReboot(self):
+    """Returns true iff the instance needs to be started after transfer."""
+    return (self.op.shutdown and
+            self.instance.admin_state == constants.ADMINST_UP and
+            not self.op.remove_instance)
+
   def Exec(self, feedback_fn):
     """Export an instance to an image in the cluster.
 
@@ -462,22 +485,25 @@ class LUBackupExport(LogicalUnit):
       helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
                                                      self.instance)
 
-      helper.CreateSnapshots()
-      try:
-        if (self.op.shutdown and
-            self.instance.admin_state == constants.ADMINST_UP and
-            not self.op.remove_instance):
-          assert self.instance.disks_active
-          feedback_fn("Starting instance %s" % self.instance.name)
-          result = self.rpc.call_instance_start(src_node_uuid,
-                                                (self.instance, None, None),
-                                                False, self.op.reason)
-          msg = result.fail_msg
-          if msg:
-            feedback_fn("Failed to start instance: %s" % msg)
-            ShutdownInstanceDisks(self, self.instance)
-            raise errors.OpExecError("Could not start instance: %s" % msg)
+      snapshots_available = False
+      if self.TrySnapshot():
+        snapshots_available = helper.CreateSnapshots()
+        if not snapshots_available:
+          if not self.op.shutdown:
+            raise errors.OpExecError(
+              "Not all disks could be snapshotted, and you requested a live "
+              "export; aborting"
+            )
+          if not self.op.long_sleep:
+            raise errors.OpExecError(
+              "Not all disks could be snapshotted, and you did not allow the "
+              "instance to remain offline for a longer time through the "
+              "--long-sleep option; aborting"
+            )
 
+      try:
+        if self.DoReboot() and snapshots_available:
+          self.StartInstance(feedback_fn, src_node_uuid)
         if self.op.mode == constants.EXPORT_MODE_LOCAL:
           (fin_resu, dresults) = helper.LocalExport(self.dst_node,
                                                     self.op.compress)
@@ -495,6 +521,9 @@ class LUBackupExport(LogicalUnit):
                                                      key_name, dest_ca_pem,
                                                      self.op.compress,
                                                      timeouts)
+
+        if self.DoReboot() and not snapshots_available:
+          self.StartInstance(feedback_fn, src_node_uuid)
       finally:
         helper.Cleanup()
 
@@ -546,15 +575,8 @@ class LUBackupRemove(NoHooksLU):
       # we don't need to lock the instance itself, as nothing will happen to it
       # (and we can remove exports also for a removed instance)
       locking.LEVEL_NODE: locking.ALL_SET,
-
-      # Removing backups is quick, so blocking allocations is justified
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
 
-    # Allocations should be stopped while this LU runs with node locks, but it
-    # doesn't have to be exclusive
-    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
-
   def Exec(self, feedback_fn):
     """Remove any export.
 
index 31f65d2..6307bb1 100644 (file)
@@ -189,7 +189,6 @@ class LogicalUnit(object): # pylint: disable=R0902
       locking.LEVEL_CLUSTER: (lambda: [locking.BGL]),
       locking.LEVEL_INSTANCE:
         lambda: self.cfg.GetInstanceNames(self.cfg.GetInstanceList()),
-      locking.LEVEL_NODE_ALLOC: (lambda: [locking.NAL]),
       locking.LEVEL_NODEGROUP: self.cfg.GetNodeGroupList,
       locking.LEVEL_NODE: self.cfg.GetNodeList,
       locking.LEVEL_NODE_RES: self.cfg.GetNodeList,
@@ -440,7 +439,7 @@ class LogicalUnit(object): # pylint: disable=R0902
     # pylint: disable=W0613,R0201
     return lu_result
 
-  def _ExpandAndLockInstance(self):
+  def _ExpandAndLockInstance(self, allow_forthcoming=False):
     """Helper function to expand and lock an instance.
 
     Many LUs that work on an instance take its name in self.op.instance_name
@@ -449,6 +448,10 @@ class LogicalUnit(object): # pylint: disable=R0902
     name. It also initializes needed_locks as a dict, if this hasn't been done
     before.
 
+    @param allow_forthcoming: if True, do not insist that the intsance be real;
+        the default behaviour is to raise a prerequisite error if the specified
+        instance is forthcoming.
+
     """
     if self.needed_locks is None:
       self.needed_locks = {}
@@ -459,6 +462,10 @@ class LogicalUnit(object): # pylint: disable=R0902
       ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
                                 self.op.instance_name)
     self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
+    if not allow_forthcoming:
+      if self.cfg.GetInstanceInfo(self.op.instance_uuid).forthcoming:
+        raise errors.OpPrereqError(
+          "forthcoming instances not supported for this operation")
 
   def _LockInstancesNodes(self, primary_only=False,
                           level=locking.LEVEL_NODE):
@@ -510,6 +517,12 @@ class LogicalUnit(object): # pylint: disable=R0902
 
     del self.recalculate_locks[level]
 
+  def AssertReleasedLocks(self, level):
+    """Raise AssertionError if the LU holds some locks of the given level.
+
+    """
+    assert not self.owned_locks(level)
+
 
 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
   """Simple LU which runs no hooks.
diff --git a/lib/cmdlib/cluster/__init__.py b/lib/cmdlib/cluster/__init__.py
new file mode 100644 (file)
index 0000000..51474d6
--- /dev/null
@@ -0,0 +1,1802 @@
+#
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Logical units dealing with the cluster."""
+
+import copy
+import itertools
+import logging
+import operator
+import os
+import re
+import time
+
+from ganeti import compat
+from ganeti import constants
+from ganeti import errors
+from ganeti import hypervisor
+from ganeti import locking
+from ganeti import masterd
+from ganeti import netutils
+from ganeti import objects
+from ganeti import opcodes
+from ganeti import pathutils
+from ganeti import query
+import ganeti.rpc.node as rpc
+from ganeti import runtime
+from ganeti import ssh
+from ganeti import uidpool
+from ganeti import utils
+from ganeti import vcluster
+
+from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
+  ResultWithJobs
+from ganeti.cmdlib.common import ShareAll, RunPostHook, \
+  ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
+  GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
+  GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
+  CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
+  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
+  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
+  CheckDiskAccessModeConsistency, GetClientCertDigest, \
+  AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
+  CheckImageValidity, CheckDiskAccessModeConsistency, EnsureKvmdOnNodes
+
+import ganeti.masterd.instance
+
+
+class LUClusterRenewCrypto(NoHooksLU):
+  """Renew the cluster's crypto tokens.
+
+  """
+
+  _MAX_NUM_RETRIES = 3
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+    }
+    self.share_locks = ShareAll()
+    self.share_locks[locking.LEVEL_NODE] = 0
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the cluster is empty.
+
+    Any errors are signaled by raising errors.OpPrereqError.
+
+    """
+    self._ssh_renewal_suppressed = \
+      not self.cfg.GetClusterInfo().modify_ssh_setup and self.op.ssh_keys
+
+  def _RenewNodeSslCertificates(self, feedback_fn):
+    """Renews the nodes' SSL certificates.
+
+    Note that most of this operation is done in gnt_cluster.py, this LU only
+    takes care of the renewal of the client SSL certificates.
+
+    """
+    master_uuid = self.cfg.GetMasterNode()
+    cluster = self.cfg.GetClusterInfo()
+
+    logging.debug("Renewing the master's SSL node certificate."
+                  " Master's UUID: %s.", master_uuid)
+
+    # mapping node UUIDs to client certificate digests
+    digest_map = {}
+    master_digest = utils.GetCertificateDigest(
+        cert_filename=pathutils.NODED_CLIENT_CERT_FILE)
+    digest_map[master_uuid] = master_digest
+    logging.debug("Adding the master's SSL node certificate digest to the"
+                  " configuration. Master's UUID: %s, Digest: %s",
+                  master_uuid, master_digest)
+
+    node_errors = {}
+    nodes = self.cfg.GetAllNodesInfo()
+    logging.debug("Renewing non-master nodes' node certificates.")
+    for (node_uuid, node_info) in nodes.items():
+      if node_info.offline:
+        logging.info("* Skipping offline node %s", node_info.name)
+        continue
+      if node_uuid != master_uuid:
+        logging.debug("Adding certificate digest of node '%s'.", node_uuid)
+        last_exception = None
+        for i in range(self._MAX_NUM_RETRIES):
+          try:
+            if node_info.master_candidate:
+              node_digest = GetClientCertDigest(self, node_uuid)
+              digest_map[node_uuid] = node_digest
+              logging.debug("Added the node's certificate to candidate"
+                            " certificate list. Current list: %s.",
+                            str(cluster.candidate_certs))
+            break
+          except errors.OpExecError as e:
+            last_exception = e
+            logging.error("Could not fetch a non-master node's SSL node"
+                          " certificate at attempt no. %s. The node's UUID"
+                          " is %s, and the error was: %s.",
+                          str(i), node_uuid, e)
+        else:
+          if last_exception:
+            node_errors[node_uuid] = last_exception
+
+    if node_errors:
+      msg = ("Some nodes' SSL client certificates could not be fetched."
+             " Please make sure those nodes are reachable and rerun"
+             " the operation. The affected nodes and their errors are:\n")
+      for uuid, e in node_errors.items():
+        msg += "Node %s: %s\n" % (uuid, e)
+      feedback_fn(msg)
+
+    self.cfg.SetCandidateCerts(digest_map)
+
+  def _RenewSshKeys(self):
+    """Renew all nodes' SSH keys.
+
+    """
+    master_uuid = self.cfg.GetMasterNode()
+
+    nodes = self.cfg.GetAllNodesInfo()
+    nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info)
+                        in nodes.items() if not node_info.offline]
+    node_names = [name for (_, name) in nodes_uuid_names]
+    node_uuids = [uuid for (uuid, _) in nodes_uuid_names]
+    potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
+    master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
+    result = self.rpc.call_node_ssh_keys_renew(
+      [master_uuid],
+      node_uuids, node_names,
+      master_candidate_uuids,
+      potential_master_candidates)
+    result[master_uuid].Raise("Could not renew the SSH keys of all nodes")
+
+  def Exec(self, feedback_fn):
+    if self.op.node_certificates:
+      feedback_fn("Renewing Node SSL certificates")
+      self._RenewNodeSslCertificates(feedback_fn)
+    if self.op.ssh_keys and not self._ssh_renewal_suppressed:
+      feedback_fn("Renewing SSH keys")
+      self._RenewSshKeys()
+    elif self._ssh_renewal_suppressed:
+      feedback_fn("Cannot renew SSH keys if the cluster is configured to not"
+                  " modify the SSH setup.")
+
+
+class LUClusterActivateMasterIp(NoHooksLU):
+  """Activate the master IP on the master node.
+
+  """
+  def Exec(self, feedback_fn):
+    """Activate the master IP.
+
+    """
+    master_params = self.cfg.GetMasterNetworkParameters()
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_activate_master_ip(master_params.uuid,
+                                                   master_params, ems)
+    result.Raise("Could not activate the master IP")
+
+
+class LUClusterDeactivateMasterIp(NoHooksLU):
+  """Deactivate the master IP on the master node.
+
+  """
+  def Exec(self, feedback_fn):
+    """Deactivate the master IP.
+
+    """
+    master_params = self.cfg.GetMasterNetworkParameters()
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+    result.Raise("Could not deactivate the master IP")
+
+
+class LUClusterConfigQuery(NoHooksLU):
+  """Return configuration values.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.cq = ClusterQuery(None, self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.cq.ExpandNames(self)
+
+  def DeclareLocks(self, level):
+    self.cq.DeclareLocks(self, level)
+
+  def Exec(self, feedback_fn):
+    result = self.cq.OldStyleQuery(self)
+
+    assert len(result) == 1
+
+    return result[0]
+
+
+class LUClusterDestroy(LogicalUnit):
+  """Logical unit for destroying the cluster.
+
+  """
+  HPATH = "cluster-destroy"
+  HTYPE = constants.HTYPE_CLUSTER
+
+  # Read by the job queue to detect when the cluster is gone and job files will
+  # never be available.
+  # FIXME: This variable should be removed together with the Python job queue.
+  clusterHasBeenDestroyed = False
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([], [])
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the cluster is empty.
+
+    Any errors are signaled by raising errors.OpPrereqError.
+
+    """
+    master = self.cfg.GetMasterNode()
+
+    nodelist = self.cfg.GetNodeList()
+    if len(nodelist) != 1 or nodelist[0] != master:
+      raise errors.OpPrereqError("There are still %d node(s) in"
+                                 " this cluster." % (len(nodelist) - 1),
+                                 errors.ECODE_INVAL)
+    instancelist = self.cfg.GetInstanceList()
+    if instancelist:
+      raise errors.OpPrereqError("There are still %d instance(s) in"
+                                 " this cluster." % len(instancelist),
+                                 errors.ECODE_INVAL)
+
+  def Exec(self, feedback_fn):
+    """Destroys the cluster.
+
+    """
+    master_params = self.cfg.GetMasterNetworkParameters()
+
+    # Run post hooks on master node before it's removed
+    RunPostHook(self, self.cfg.GetNodeName(master_params.uuid))
+
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+    result.Warn("Error disabling the master IP address", self.LogWarning)
+
+    self.wconfd.Client().PrepareClusterDestruction(self.wconfdcontext)
+
+    # signal to the job queue that the cluster is gone
+    LUClusterDestroy.clusterHasBeenDestroyed = True
+
+    return master_params.uuid
+
+
+class LUClusterPostInit(LogicalUnit):
+  """Logical unit for running hooks after cluster initialization.
+
+  """
+  HPATH = "cluster-init"
+  HTYPE = constants.HTYPE_CLUSTER
+
+  def CheckArguments(self):
+    self.master_uuid = self.cfg.GetMasterNode()
+    self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
+
+    # TODO: When Issue 584 is solved, and None is properly parsed when used
+    # as a default value, ndparams.get(.., None) can be changed to
+    # ndparams[..] to access the values directly
+
+    # OpenvSwitch: Warn user if link is missing
+    if (self.master_ndparams[constants.ND_OVS] and not
+        self.master_ndparams.get(constants.ND_OVS_LINK, None)):
+      self.LogInfo("No physical interface for OpenvSwitch was given."
+                   " OpenvSwitch will not have an outside connection. This"
+                   " might not be what you want.")
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([], [self.cfg.GetMasterNode()])
+
+  def Exec(self, feedback_fn):
+    """Create and configure Open vSwitch
+
+    """
+    if self.master_ndparams[constants.ND_OVS]:
+      result = self.rpc.call_node_configure_ovs(
+                 self.master_uuid,
+                 self.master_ndparams[constants.ND_OVS_NAME],
+                 self.master_ndparams.get(constants.ND_OVS_LINK, None))
+      result.Raise("Could not successully configure Open vSwitch")
+
+    return True
+
+
+class ClusterQuery(QueryBase):
+  FIELDS = query.CLUSTER_FIELDS
+
+  #: Do not sort (there is only one item)
+  SORT_FIELD = None
+
+  def ExpandNames(self, lu):
+    lu.needed_locks = {}
+
+    # The following variables interact with _QueryBase._GetNames
+    self.wanted = locking.ALL_SET
+    self.do_locking = self.use_locking
+
+    if self.do_locking:
+      raise errors.OpPrereqError("Can not use locking for cluster queries",
+                                 errors.ECODE_INVAL)
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    if query.CQ_CONFIG in self.requested_data:
+      cluster = lu.cfg.GetClusterInfo()
+      nodes = lu.cfg.GetAllNodesInfo()
+    else:
+      cluster = NotImplemented
+      nodes = NotImplemented
+
+    if query.CQ_QUEUE_DRAINED in self.requested_data:
+      drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
+    else:
+      drain_flag = NotImplemented
+
+    if query.CQ_WATCHER_PAUSE in self.requested_data:
+      master_node_uuid = lu.cfg.GetMasterNode()
+
+      result = lu.rpc.call_get_watcher_pause(master_node_uuid)
+      result.Raise("Can't retrieve watcher pause from master node '%s'" %
+                   lu.cfg.GetMasterNodeName())
+
+      watcher_pause = result.payload
+    else:
+      watcher_pause = NotImplemented
+
+    return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
+
+
+class LUClusterQuery(NoHooksLU):
+  """Query cluster configuration.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
+
+  def Exec(self, feedback_fn):
+    """Return cluster config.
+
+    """
+    cluster = self.cfg.GetClusterInfo()
+    os_hvp = {}
+
+    # Filter just for enabled hypervisors
+    for os_name, hv_dict in cluster.os_hvp.items():
+      os_hvp[os_name] = {}
+      for hv_name, hv_params in hv_dict.items():
+        if hv_name in cluster.enabled_hypervisors:
+          os_hvp[os_name][hv_name] = hv_params
+
+    # Convert ip_family to ip_version
+    primary_ip_version = constants.IP4_VERSION
+    if cluster.primary_ip_family == netutils.IP6Address.family:
+      primary_ip_version = constants.IP6_VERSION
+
+    result = {
+      "software_version": constants.RELEASE_VERSION,
+      "protocol_version": constants.PROTOCOL_VERSION,
+      "config_version": constants.CONFIG_VERSION,
+      "os_api_version": max(constants.OS_API_VERSIONS),
+      "export_version": constants.EXPORT_VERSION,
+      "vcs_version": constants.VCS_VERSION,
+      "architecture": runtime.GetArchInfo(),
+      "name": cluster.cluster_name,
+      "master": self.cfg.GetMasterNodeName(),
+      "default_hypervisor": cluster.primary_hypervisor,
+      "enabled_hypervisors": cluster.enabled_hypervisors,
+      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
+                        for hypervisor_name in cluster.enabled_hypervisors]),
+      "os_hvp": os_hvp,
+      "beparams": cluster.beparams,
+      "osparams": cluster.osparams,
+      "ipolicy": cluster.ipolicy,
+      "nicparams": cluster.nicparams,
+      "ndparams": cluster.ndparams,
+      "diskparams": cluster.diskparams,
+      "candidate_pool_size": cluster.candidate_pool_size,
+      "max_running_jobs": cluster.max_running_jobs,
+      "max_tracked_jobs": cluster.max_tracked_jobs,
+      "mac_prefix": cluster.mac_prefix,
+      "master_netdev": cluster.master_netdev,
+      "master_netmask": cluster.master_netmask,
+      "use_external_mip_script": cluster.use_external_mip_script,
+      "volume_group_name": cluster.volume_group_name,
+      "drbd_usermode_helper": cluster.drbd_usermode_helper,
+      "file_storage_dir": cluster.file_storage_dir,
+      "shared_file_storage_dir": cluster.shared_file_storage_dir,
+      "maintain_node_health": cluster.maintain_node_health,
+      "ctime": cluster.ctime,
+      "mtime": cluster.mtime,
+      "uuid": cluster.uuid,
+      "tags": list(cluster.GetTags()),
+      "uid_pool": cluster.uid_pool,
+      "default_iallocator": cluster.default_iallocator,
+      "default_iallocator_params": cluster.default_iallocator_params,
+      "reserved_lvs": cluster.reserved_lvs,
+      "primary_ip_version": primary_ip_version,
+      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
+      "hidden_os": cluster.hidden_os,
+      "blacklisted_os": cluster.blacklisted_os,
+      "enabled_disk_templates": cluster.enabled_disk_templates,
+      "install_image": cluster.install_image,
+      "instance_communication_network": cluster.instance_communication_network,
+      "compression_tools": cluster.compression_tools,
+      "enabled_user_shutdown": cluster.enabled_user_shutdown,
+      }
+
+    return result
+
+
+class LUClusterRedistConf(NoHooksLU):
+  """Force the redistribution of cluster configuration.
+
+  This is a very simple LU.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+    }
+    self.share_locks = ShareAll()
+
+  def Exec(self, feedback_fn):
+    """Redistribute the configuration.
+
+    """
+    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
+    RedistributeAncillaryFiles(self)
+
+
+class LUClusterRename(LogicalUnit):
+  """Rename the cluster.
+
+  """
+  HPATH = "cluster-rename"
+  HTYPE = constants.HTYPE_CLUSTER
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      "NEW_NAME": self.op.name,
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
+
+  def CheckPrereq(self):
+    """Verify that the passed name is a valid one.
+
+    """
+    hostname = netutils.GetHostname(name=self.op.name,
+                                    family=self.cfg.GetPrimaryIPFamily())
+
+    new_name = hostname.name
+    self.ip = new_ip = hostname.ip
+    old_name = self.cfg.GetClusterName()
+    old_ip = self.cfg.GetMasterIP()
+    if new_name == old_name and new_ip == old_ip:
+      raise errors.OpPrereqError("Neither the name nor the IP address of the"
+                                 " cluster has changed",
+                                 errors.ECODE_INVAL)
+    if new_ip != old_ip:
+      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
+        raise errors.OpPrereqError("The given cluster IP address (%s) is"
+                                   " reachable on the network" %
+                                   new_ip, errors.ECODE_NOTUNIQUE)
+
+    self.op.name = new_name
+
+  def Exec(self, feedback_fn):
+    """Rename the cluster.
+
+    """
+    clustername = self.op.name
+    new_ip = self.ip
+
+    # shutdown the master IP
+    master_params = self.cfg.GetMasterNetworkParameters()
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+    result.Raise("Could not disable the master role")
+
+    try:
+      cluster = self.cfg.GetClusterInfo()
+      cluster.cluster_name = clustername
+      cluster.master_ip = new_ip
+      self.cfg.Update(cluster, feedback_fn)
+
+      # update the known hosts file
+      ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
+      node_list = self.cfg.GetOnlineNodeList()
+      try:
+        node_list.remove(master_params.uuid)
+      except ValueError:
+        pass
+      UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
+    finally:
+      master_params.ip = new_ip
+      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+      result.Warn("Could not re-enable the master role on the master,"
+                  " please restart manually", self.LogWarning)
+
+    return clustername
+
+
+class LUClusterRepairDiskSizes(NoHooksLU):
+  """Verifies the cluster disks sizes.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    if self.op.instances:
+      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
+      # Not getting the node allocation lock as only a specific set of
+      # instances (and their nodes) is going to be acquired
+      self.needed_locks = {
+        locking.LEVEL_NODE_RES: [],
+        locking.LEVEL_INSTANCE: self.wanted_names,
+        }
+      self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+    else:
+      self.wanted_names = None
+      self.needed_locks = {
+        locking.LEVEL_NODE_RES: locking.ALL_SET,
+        locking.LEVEL_INSTANCE: locking.ALL_SET,
+        }
+
+    self.share_locks = {
+      locking.LEVEL_NODE_RES: 1,
+      locking.LEVEL_INSTANCE: 0,
+      }
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
+      self._LockInstancesNodes(primary_only=True, level=level)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This only checks the optional instance list against the existing names.
+
+    """
+    if self.wanted_names is None:
+      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
+
+    self.wanted_instances = \
+        map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
+
+  def _EnsureChildSizes(self, disk):
+    """Ensure children of the disk have the needed disk size.
+
+    This is valid mainly for DRBD8 and fixes an issue where the
+    children have smaller disk size.
+
+    @param disk: an L{ganeti.objects.Disk} object
+
+    """
+    if disk.dev_type == constants.DT_DRBD8:
+      assert disk.children, "Empty children for DRBD8?"
+      fchild = disk.children[0]
+      mismatch = fchild.size < disk.size
+      if mismatch:
+        self.LogInfo("Child disk has size %d, parent %d, fixing",
+                     fchild.size, disk.size)
+        fchild.size = disk.size
+
+      # and we recurse on this child only, not on the metadev
+      return self._EnsureChildSizes(fchild) or mismatch
+    else:
+      return False
+
+  def Exec(self, feedback_fn):
+    """Verify the size of cluster disks.
+
+    """
+    # TODO: check child disks too
+    # TODO: check differences in size between primary/secondary nodes
+    per_node_disks = {}
+    for instance in self.wanted_instances:
+      pnode = instance.primary_node
+      if pnode not in per_node_disks:
+        per_node_disks[pnode] = []
+      for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
+        per_node_disks[pnode].append((instance, idx, disk))
+
+    assert not (frozenset(per_node_disks.keys()) -
+                frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \
+      "Not owning correct locks"
+    assert not self.owned_locks(locking.LEVEL_NODE)
+
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
+                                               per_node_disks.keys())
+
+    changed = []
+    for node_uuid, dskl in per_node_disks.items():
+      if not dskl:
+        # no disks on the node
+        continue
+
+      newl = [([v[2].Copy()], v[0]) for v in dskl]
+      node_name = self.cfg.GetNodeName(node_uuid)
+      result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
+      if result.fail_msg:
+        self.LogWarning("Failure in blockdev_getdimensions call to node"
+                        " %s, ignoring", node_name)
+        continue
+      if len(result.payload) != len(dskl):
+        logging.warning("Invalid result from node %s: len(dksl)=%d,"
+                        " result.payload=%s", node_name, len(dskl),
+                        result.payload)
+        self.LogWarning("Invalid result from node %s, ignoring node results",
+                        node_name)
+        continue
+      for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
+        if dimensions is None:
+          self.LogWarning("Disk %d of instance %s did not return size"
+                          " information, ignoring", idx, instance.name)
+          continue
+        if not isinstance(dimensions, (tuple, list)):
+          self.LogWarning("Disk %d of instance %s did not return valid"
+                          " dimension information, ignoring", idx,
+                          instance.name)
+          continue
+        (size, spindles) = dimensions
+        if not isinstance(size, (int, long)):
+          self.LogWarning("Disk %d of instance %s did not return valid"
+                          " size information, ignoring", idx, instance.name)
+          continue
+        size = size >> 20
+        if size != disk.size:
+          self.LogInfo("Disk %d of instance %s has mismatched size,"
+                       " correcting: recorded %d, actual %d", idx,
+                       instance.name, disk.size, size)
+          disk.size = size
+          self.cfg.Update(disk, feedback_fn)
+          changed.append((instance.name, idx, "size", size))
+        if es_flags[node_uuid]:
+          if spindles is None:
+            self.LogWarning("Disk %d of instance %s did not return valid"
+                            " spindles information, ignoring", idx,
+                            instance.name)
+          elif disk.spindles is None or disk.spindles != spindles:
+            self.LogInfo("Disk %d of instance %s has mismatched spindles,"
+                         " correcting: recorded %s, actual %s",
+                         idx, instance.name, disk.spindles, spindles)
+            disk.spindles = spindles
+            self.cfg.Update(disk, feedback_fn)
+            changed.append((instance.name, idx, "spindles", disk.spindles))
+        if self._EnsureChildSizes(disk):
+          self.cfg.Update(disk, feedback_fn)
+          changed.append((instance.name, idx, "size", disk.size))
+    return changed
+
+
+def _ValidateNetmask(cfg, netmask):
+  """Checks if a netmask is valid.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: cluster configuration
+  @type netmask: int
+  @param netmask: netmask to be verified
+  @raise errors.OpPrereqError: if the validation fails
+
+  """
+  ip_family = cfg.GetPrimaryIPFamily()
+  try:
+    ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
+  except errors.ProgrammerError:
+    raise errors.OpPrereqError("Invalid primary ip family: %s." %
+                               ip_family, errors.ECODE_INVAL)
+  if not ipcls.ValidateNetmask(netmask):
+    raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
+                               (netmask), errors.ECODE_INVAL)
+
+
+def CheckFileBasedStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates,
+    file_disk_template):
+  """Checks whether the given file-based storage directory is acceptable.
+
+  Note: This function is public, because it is also used in bootstrap.py.
+
+  @type logging_warn_fn: function
+  @param logging_warn_fn: function which accepts a string and logs it
+  @type file_storage_dir: string
+  @param file_storage_dir: the directory to be used for file-based instances
+  @type enabled_disk_templates: list of string
+  @param enabled_disk_templates: the list of enabled disk templates
+  @type file_disk_template: string
+  @param file_disk_template: the file-based disk template for which the
+      path should be checked
+
+  """
+  assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
+            constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
+         ))
+
+  file_storage_enabled = file_disk_template in enabled_disk_templates
+  if file_storage_dir is not None:
+    if file_storage_dir == "":
+      if file_storage_enabled:
+        raise errors.OpPrereqError(
+            "Unsetting the '%s' storage directory while having '%s' storage"
+            " enabled is not permitted." %
+            (file_disk_template, file_disk_template),
+            errors.ECODE_INVAL)
+    else:
+      if not file_storage_enabled:
+        logging_warn_fn(
+            "Specified a %s storage directory, although %s storage is not"
+            " enabled." % (file_disk_template, file_disk_template))
+  else:
+    raise errors.ProgrammerError("Received %s storage dir with value"
+                                 " 'None'." % file_disk_template)
+
+
+def CheckFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_FILE)
+
+
+def CheckSharedFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given shared file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_SHARED_FILE)
+
+
+def CheckGlusterStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given gluster storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_GLUSTER)
+
+
+def CheckCompressionTools(tools):
+  """Check whether the provided compression tools look like executables.
+
+  @type tools: list of string
+  @param tools: The tools provided as opcode input
+
+  """
+  regex = re.compile('^[-_a-zA-Z0-9]+$')
+  illegal_tools = [t for t in tools if not regex.match(t)]
+
+  if illegal_tools:
+    raise errors.OpPrereqError(
+      "The tools '%s' contain illegal characters: only alphanumeric values,"
+      " dashes, and underscores are allowed" % ", ".join(illegal_tools),
+      errors.ECODE_INVAL
+    )
+
+  if constants.IEC_GZIP not in tools:
+    raise errors.OpPrereqError("For compatibility reasons, the %s utility must"
+                               " be present among the compression tools" %
+                               constants.IEC_GZIP, errors.ECODE_INVAL)
+
+  if constants.IEC_NONE in tools:
+    raise errors.OpPrereqError("%s is a reserved value used for no compression,"
+                               " and cannot be used as the name of a tool" %
+                               constants.IEC_NONE, errors.ECODE_INVAL)
+
+
+class LUClusterSetParams(LogicalUnit):
+  """Change the parameters of the cluster.
+
+  """
+  HPATH = "cluster-modify"
+  HTYPE = constants.HTYPE_CLUSTER
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    """Check parameters
+
+    """
+    if self.op.uid_pool:
+      uidpool.CheckUidPool(self.op.uid_pool)
+
+    if self.op.add_uids:
+      uidpool.CheckUidPool(self.op.add_uids)
+
+    if self.op.remove_uids:
+      uidpool.CheckUidPool(self.op.remove_uids)
+
+    if self.op.mac_prefix:
+      self.op.mac_prefix = \
+          utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
+
+    if self.op.master_netmask is not None:
+      _ValidateNetmask(self.cfg, self.op.master_netmask)
+
+    if self.op.diskparams:
+      for dt_params in self.op.diskparams.values():
+        utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+      try:
+        utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
+        CheckDiskAccessModeValidity(self.op.diskparams)
+      except errors.OpPrereqError, err:
+        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
+                                   errors.ECODE_INVAL)
+
+    if self.op.install_image is not None:
+      CheckImageValidity(self.op.install_image,
+                         "Install image must be an absolute path or a URL")
+
+  def ExpandNames(self):
+    # FIXME: in the future maybe other cluster params won't require checking on
+    # all nodes to be modified.
+    # FIXME: This opcode changes cluster-wide settings. Is acquiring all
+    # resource locks the right thing, shouldn't it be the BGL instead?
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
+      locking.LEVEL_NODEGROUP: locking.ALL_SET,
+    }
+    self.share_locks = ShareAll()
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      "NEW_VG_NAME": self.op.vg_name,
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def _CheckVgName(self, node_uuids, enabled_disk_templates,
+                   new_enabled_disk_templates):
+    """Check the consistency of the vg name on all nodes and in case it gets
+       unset whether there are instances still using it.
+
+    """
+    lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
+    lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
+                                            new_enabled_disk_templates)
+    current_vg_name = self.cfg.GetVGName()
+
+    if self.op.vg_name == '':
+      if lvm_is_enabled:
+        raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                   " disk templates are or get enabled.",
+                                   errors.ECODE_INVAL)
+
+    if self.op.vg_name is None:
+      if current_vg_name is None and lvm_is_enabled:
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.",
+                                   errors.ECODE_INVAL)
+
+    if self.op.vg_name is not None and not self.op.vg_name:
+      if self.cfg.DisksOfType(constants.DT_PLAIN):
+        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
+                                   " instances exist", errors.ECODE_INVAL)
+
+    if (self.op.vg_name is not None and lvm_is_enabled) or \
+        (self.cfg.GetVGName() is not None and lvm_gets_enabled):
+      self._CheckVgNameOnNodes(node_uuids)
+
+  def _CheckVgNameOnNodes(self, node_uuids):
+    """Check the status of the volume group on each node.
+
+    """
+    vglist = self.rpc.call_vg_list(node_uuids)
+    for node_uuid in node_uuids:
+      msg = vglist[node_uuid].fail_msg
+      if msg:
+        # ignoring down node
+        self.LogWarning("Error while gathering data on node %s"
+                        " (ignoring node): %s",
+                        self.cfg.GetNodeName(node_uuid), msg)
+        continue
+      vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload,
+                                            self.op.vg_name,
+                                            constants.MIN_VG_SIZE)
+      if vgstatus:
+        raise errors.OpPrereqError("Error on node '%s': %s" %
+                                   (self.cfg.GetNodeName(node_uuid), vgstatus),
+                                   errors.ECODE_ENVIRON)
+
+  @staticmethod
+  def _GetDiskTemplateSetsInner(op_enabled_disk_templates,
+                                old_enabled_disk_templates):
+    """Computes three sets of disk templates.
+
+    @see: C{_GetDiskTemplateSets} for more details.
+
+    """
+    enabled_disk_templates = None
+    new_enabled_disk_templates = []
+    disabled_disk_templates = []
+    if op_enabled_disk_templates:
+      enabled_disk_templates = op_enabled_disk_templates
+      new_enabled_disk_templates = \
+        list(set(enabled_disk_templates)
+             - set(old_enabled_disk_templates))
+      disabled_disk_templates = \
+        list(set(old_enabled_disk_templates)
+             - set(enabled_disk_templates))
+    else:
+      enabled_disk_templates = old_enabled_disk_templates
+    return (enabled_disk_templates, new_enabled_disk_templates,
+            disabled_disk_templates)
+
+  def _GetDiskTemplateSets(self, cluster):
+    """Computes three sets of disk templates.
+
+    The three sets are:
+      - disk templates that will be enabled after this operation (no matter if
+        they were enabled before or not)
+      - disk templates that get enabled by this operation (thus haven't been
+        enabled before.)
+      - disk templates that get disabled by this operation
+
+    """
+    return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates,
+                                          cluster.enabled_disk_templates)
+
+  def _CheckIpolicy(self, cluster, enabled_disk_templates):
+    """Checks the ipolicy.
+
+    @type cluster: C{objects.Cluster}
+    @param cluster: the cluster's configuration
+    @type enabled_disk_templates: list of string
+    @param enabled_disk_templates: list of (possibly newly) enabled disk
+      templates
+
+    """
+    # FIXME: write unit tests for this
+    if self.op.ipolicy:
+      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+                                           group_policy=False)
+
+      CheckIpolicyVsDiskTemplates(self.new_ipolicy,
+                                  enabled_disk_templates)
+
+      all_instances = self.cfg.GetAllInstancesInfo().values()
+      violations = set()
+      for group in self.cfg.GetAllNodeGroupsInfo().values():
+        instances = frozenset(
+          [inst for inst in all_instances
+           if compat.any(nuuid in group.members
+           for nuuid in self.cfg.GetInstanceNodes(inst.uuid))])
+        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
+        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
+                                           self.cfg)
+        if new:
+          violations.update(new)
+
+      if violations:
+        self.LogWarning("After the ipolicy change the following instances"
+                        " violate them: %s",
+                        utils.CommaJoin(utils.NiceSort(violations)))
+    else:
+      CheckIpolicyVsDiskTemplates(cluster.ipolicy,
+                                  enabled_disk_templates)
+
+  def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
+    """Checks whether the set DRBD helper actually exists on the nodes.
+
+    @type drbd_helper: string
+    @param drbd_helper: path of the drbd usermode helper binary
+    @type node_uuids: list of strings
+    @param node_uuids: list of node UUIDs to check for the helper
+
+    """
+    # checks given drbd helper on all nodes
+    helpers = self.rpc.call_drbd_helper(node_uuids)
+    for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
+      if ninfo.offline:
+        self.LogInfo("Not checking drbd helper on offline node %s",
+                     ninfo.name)
+        continue
+      msg = helpers[ninfo.uuid].fail_msg
+      if msg:
+        raise errors.OpPrereqError("Error checking drbd helper on node"
+                                   " '%s': %s" % (ninfo.name, msg),
+                                   errors.ECODE_ENVIRON)
+      node_helper = helpers[ninfo.uuid].payload
+      if node_helper != drbd_helper:
+        raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
+                                   (ninfo.name, node_helper),
+                                   errors.ECODE_ENVIRON)
+
+  def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
+    """Check the DRBD usermode helper.
+
+    @type node_uuids: list of strings
+    @param node_uuids: a list of nodes' UUIDs
+    @type drbd_enabled: boolean
+    @param drbd_enabled: whether DRBD will be enabled after this operation
+      (no matter if it was disabled before or not)
+    @type drbd_gets_enabled: boolen
+    @param drbd_gets_enabled: true if DRBD was disabled before this
+      operation, but will be enabled afterwards
+
+    """
+    if self.op.drbd_helper == '':
+      if drbd_enabled:
+        raise errors.OpPrereqError("Cannot disable drbd helper while"
+                                   " DRBD is enabled.", errors.ECODE_STATE)
+      if self.cfg.DisksOfType(constants.DT_DRBD8):
+        raise errors.OpPrereqError("Cannot disable drbd helper while"
+                                   " drbd-based instances exist",
+                                   errors.ECODE_INVAL)
+
+    else:
+      if self.op.drbd_helper is not None and drbd_enabled:
+        self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
+      else:
+        if drbd_gets_enabled:
+          current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
+          if current_drbd_helper is not None:
+            self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
+          else:
+            raise errors.OpPrereqError("Cannot enable DRBD without a"
+                                       " DRBD usermode helper set.",
+                                       errors.ECODE_STATE)
+
+  def _CheckInstancesOfDisabledDiskTemplates(
+      self, disabled_disk_templates):
+    """Check whether we try to disable a disk template that is in use.
+
+    @type disabled_disk_templates: list of string
+    @param disabled_disk_templates: list of disk templates that are going to
+      be disabled by this operation
+
+    """
+    for disk_template in disabled_disk_templates:
+      disks_with_type = self.cfg.DisksOfType(disk_template)
+      if disks_with_type:
+        disk_desc = []
+        for disk in disks_with_type:
+          instance_uuid = self.cfg.GetInstanceForDisk(disk.uuid)
+          instance = self.cfg.GetInstanceInfo(instance_uuid)
+          if instance:
+            instance_desc = "on " + instance.name
+          else:
+            instance_desc = "detached"
+          disk_desc.append("%s (%s)" % (disk, instance_desc))
+        raise errors.OpPrereqError(
+            "Cannot disable disk template '%s', because there is at least one"
+            " disk using it:\n * %s" % (disk_template, "\n * ".join(disk_desc)),
+            errors.ECODE_STATE)
+    if constants.DT_DISKLESS in disabled_disk_templates:
+      instances = self.cfg.GetAllInstancesInfo()
+      for inst in instances.values():
+        if not inst.disks:
+          raise errors.OpPrereqError(
+              "Cannot disable disk template 'diskless', because there is at"
+              " least one instance using it:\n * %s" % inst.name,
+              errors.ECODE_STATE)
+
+  @staticmethod
+  def _CheckInstanceCommunicationNetwork(network, warning_fn):
+    """Check whether an existing network is configured for instance
+    communication.
+
+    Checks whether an existing network is configured with the
+    parameters that are advisable for instance communication, and
+    otherwise issue security warnings.
+
+    @type network: L{ganeti.objects.Network}
+    @param network: L{ganeti.objects.Network} object whose
+                    configuration is being checked
+    @type warning_fn: function
+    @param warning_fn: function used to print warnings
+    @rtype: None
+    @return: None
+
+    """
+    def _MaybeWarn(err, val, default):
+      if val != default:
+        warning_fn("Supplied instance communication network '%s' %s '%s',"
+                   " this might pose a security risk (default is '%s').",
+                   network.name, err, val, default)
+
+    if network.network is None:
+      raise errors.OpPrereqError("Supplied instance communication network '%s'"
+                                 " must have an IPv4 network address.",
+                                 network.name)
+
+    _MaybeWarn("has an IPv4 gateway", network.gateway, None)
+    _MaybeWarn("has a non-standard IPv4 network address", network.network,
+               constants.INSTANCE_COMMUNICATION_NETWORK4)
+    _MaybeWarn("has an IPv6 gateway", network.gateway6, None)
+    _MaybeWarn("has a non-standard IPv6 network address", network.network6,
+               constants.INSTANCE_COMMUNICATION_NETWORK6)
+    _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix,
+               constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
+    node_uuids = self.owned_locks(locking.LEVEL_NODE)
+    self.cluster = cluster = self.cfg.GetClusterInfo()
+
+    vm_capable_node_uuids = [node.uuid
+                             for node in self.cfg.GetAllNodesInfo().values()
+                             if node.uuid in node_uuids and node.vm_capable]
+
+    (enabled_disk_templates, new_enabled_disk_templates,
+      disabled_disk_templates) = self._GetDiskTemplateSets(cluster)
+    self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates)
+
+    self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
+                      new_enabled_disk_templates)
+
+    if self.op.file_storage_dir is not None:
+      CheckFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
+
+    if self.op.shared_file_storage_dir is not None:
+      CheckSharedFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.shared_file_storage_dir,
+          enabled_disk_templates)
+
+    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
+    drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
+    self._CheckDrbdHelper(vm_capable_node_uuids,
+                          drbd_enabled, drbd_gets_enabled)
+
+    # validate params changes
+    if self.op.beparams:
+      objects.UpgradeBeParams(self.op.beparams)
+      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
+      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
+
+    if self.op.ndparams:
+      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
+
+      # TODO: we need a more general way to handle resetting
+      # cluster-level parameters to default values
+      if self.new_ndparams["oob_program"] == "":
+        self.new_ndparams["oob_program"] = \
+            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
+
+    if self.op.hv_state:
+      new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+                                           self.cluster.hv_state_static)
+      self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
+                               for hv, values in new_hv_state.items())
+
+    if self.op.disk_state:
+      new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
+                                               self.cluster.disk_state_static)
+      self.new_disk_state = \
+        dict((storage, dict((name, cluster.SimpleFillDiskState(values))
+                            for name, values in svalues.items()))
+             for storage, svalues in new_disk_state.items())
+
+    self._CheckIpolicy(cluster, enabled_disk_templates)
+
+    if self.op.nicparams:
+      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
+      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
+      objects.NIC.CheckParameterSyntax(self.new_nicparams)
+      nic_errors = []
+
+      # check all instances for consistency
+      for instance in self.cfg.GetAllInstancesInfo().values():
+        for nic_idx, nic in enumerate(instance.nics):
+          params_copy = copy.deepcopy(nic.nicparams)
+          params_filled = objects.FillDict(self.new_nicparams, params_copy)
+
+          # check parameter syntax
+          try:
+            objects.NIC.CheckParameterSyntax(params_filled)
+          except errors.ConfigurationError, err:
+            nic_errors.append("Instance %s, nic/%d: %s" %
+                              (instance.name, nic_idx, err))
+
+          # if we're moving instances to routed, check that they have an ip
+          target_mode = params_filled[constants.NIC_MODE]
+          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
+            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
+                              " address" % (instance.name, nic_idx))
+      if nic_errors:
+        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
+                                   "\n".join(nic_errors), errors.ECODE_INVAL)
+
+    # hypervisor list/parameters
+    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
+    if self.op.hvparams:
+      for hv_name, hv_dict in self.op.hvparams.items():
+        if hv_name not in self.new_hvparams:
+          self.new_hvparams[hv_name] = hv_dict
+        else:
+          self.new_hvparams[hv_name].update(hv_dict)
+
+    # disk template parameters
+    self.new_diskparams = objects.FillDict(cluster.diskparams, {})
+    if self.op.diskparams:
+      for dt_name, dt_params in self.op.diskparams.items():
+        if dt_name not in self.new_diskparams:
+          self.new_diskparams[dt_name] = dt_params
+        else:
+          self.new_diskparams[dt_name].update(dt_params)
+      CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
+
+    # os hypervisor parameters
+    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
+    if self.op.os_hvp:
+      for os_name, hvs in self.op.os_hvp.items():
+        if os_name not in self.new_os_hvp:
+          self.new_os_hvp[os_name] = hvs
+        else:
+          for hv_name, hv_dict in hvs.items():
+            if hv_dict is None:
+              # Delete if it exists
+              self.new_os_hvp[os_name].pop(hv_name, None)
+            elif hv_name not in self.new_os_hvp[os_name]:
+              self.new_os_hvp[os_name][hv_name] = hv_dict
+            else:
+              self.new_os_hvp[os_name][hv_name].update(hv_dict)
+
+    # os parameters
+    self._BuildOSParams(cluster)
+
+    # changes to the hypervisor list
+    if self.op.enabled_hypervisors is not None:
+      for hv in self.op.enabled_hypervisors:
+        # if the hypervisor doesn't already exist in the cluster
+        # hvparams, we initialize it to empty, and then (in both
+        # cases) we make sure to fill the defaults, as we might not
+        # have a complete defaults list if the hypervisor wasn't
+        # enabled before
+        if hv not in new_hvp:
+          new_hvp[hv] = {}
+        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
+        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
+
+    if self.op.hvparams or self.op.enabled_hypervisors is not None:
+      # either the enabled list has changed, or the parameters have, validate
+      for hv_name, hv_params in self.new_hvparams.items():
+        if ((self.op.hvparams and hv_name in self.op.hvparams) or
+            (self.op.enabled_hypervisors and
+             hv_name in self.op.enabled_hypervisors)):
+          # either this is a new hypervisor, or its parameters have changed
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
+          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+          hv_class.CheckParameterSyntax(hv_params)
+          CheckHVParams(self, node_uuids, hv_name, hv_params)
+
+    if self.op.os_hvp:
+      # no need to check any newly-enabled hypervisors, since the
+      # defaults have already been checked in the above code-block
+      for os_name, os_hvp in self.new_os_hvp.items():
+        for hv_name, hv_params in os_hvp.items():
+          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+          # we need to fill in the new os_hvp on top of the actual hv_p
+          cluster_defaults = self.new_hvparams.get(hv_name, {})
+          new_osp = objects.FillDict(cluster_defaults, hv_params)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
+          hv_class.CheckParameterSyntax(new_osp)
+          CheckHVParams(self, node_uuids, hv_name, new_osp)
+
+    if self.op.default_iallocator:
+      alloc_script = utils.FindFile(self.op.default_iallocator,
+                                    constants.IALLOCATOR_SEARCH_PATH,
+                                    os.path.isfile)
+      if alloc_script is None:
+        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
+                                   " specified" % self.op.default_iallocator,
+                                   errors.ECODE_INVAL)
+
+    if self.op.instance_communication_network:
+      network_name = self.op.instance_communication_network
+
+      try:
+        network_uuid = self.cfg.LookupNetwork(network_name)
+      except errors.OpPrereqError:
+        network_uuid = None
+
+      if network_uuid is not None:
+        network = self.cfg.GetNetwork(network_uuid)
+        self._CheckInstanceCommunicationNetwork(network, self.LogWarning)
+
+    if self.op.compression_tools:
+      CheckCompressionTools(self.op.compression_tools)
+
+  def _BuildOSParams(self, cluster):
+    "Calculate the new OS parameters for this operation."
+
+    def _GetNewParams(source, new_params):
+      "Wrapper around GetUpdatedParams."
+      if new_params is None:
+        return source
+      result = objects.FillDict(source, {}) # deep copy of source
+      for os_name in new_params:
+        result[os_name] = GetUpdatedParams(result.get(os_name, {}),
+                                           new_params[os_name],
+                                           use_none=True)
+        if not result[os_name]:
+          del result[os_name] # we removed all parameters
+      return result
+
+    self.new_osp = _GetNewParams(cluster.osparams,
+                                 self.op.osparams)
+    self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster,
+                                         self.op.osparams_private_cluster)
+
+    # Remove os validity check
+    changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys()))
+    for os_name in changed_oses:
+      os_params = cluster.SimpleFillOS(
+        os_name,
+        self.new_osp.get(os_name, {}),
+        os_params_private=self.new_osp_private.get(os_name, {})
+      )
+      # check the parameter validity (remote check)
+      CheckOSParams(self, False, [self.cfg.GetMasterNode()],
+                    os_name, os_params, False)
+
+  def _SetVgName(self, feedback_fn):
+    """Determines and sets the new volume group name.
+
+    """
+    if self.op.vg_name is not None:
+      new_volume = self.op.vg_name
+      if not new_volume:
+        new_volume = None
+      if new_volume != self.cfg.GetVGName():
+        self.cfg.SetVGName(new_volume)
+      else:
+        feedback_fn("Cluster LVM configuration already in desired"
+                    " state, not changing")
+
+  def _SetFileStorageDir(self, feedback_fn):
+    """Set the file storage directory.
+
+    """
+    if self.op.file_storage_dir is not None:
+      if self.cluster.file_storage_dir == self.op.file_storage_dir:
+        feedback_fn("Global file storage dir already set to value '%s'"
+                    % self.cluster.file_storage_dir)
+      else:
+        self.cluster.file_storage_dir = self.op.file_storage_dir
+
+  def _SetSharedFileStorageDir(self, feedback_fn):
+    """Set the shared file storage directory.
+
+    """
+    if self.op.shared_file_storage_dir is not None:
+      if self.cluster.shared_file_storage_dir == \
+          self.op.shared_file_storage_dir:
+        feedback_fn("Global shared file storage dir already set to value '%s'"
+                    % self.cluster.shared_file_storage_dir)
+      else:
+        self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
+
+  def _SetDrbdHelper(self, feedback_fn):
+    """Set the DRBD usermode helper.
+
+    """
+    if self.op.drbd_helper is not None:
+      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
+        feedback_fn("Note that you specified a drbd user helper, but did not"
+                    " enable the drbd disk template.")
+      new_helper = self.op.drbd_helper
+      if not new_helper:
+        new_helper = None
+      if new_helper != self.cfg.GetDRBDHelper():
+        self.cfg.SetDRBDHelper(new_helper)
+      else:
+        feedback_fn("Cluster DRBD helper already in desired state,"
+                    " not changing")
+
+  @staticmethod
+  def _EnsureInstanceCommunicationNetwork(cfg, network_name):
+    """Ensure that the instance communication network exists and is
+    connected to all groups.
+
+    The instance communication network given by L{network_name} it is
+    created, if necessary, via the opcode 'OpNetworkAdd'.  Also, the
+    instance communication network is connected to all existing node
+    groups, if necessary, via the opcode 'OpNetworkConnect'.
+
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: cluster configuration
+
+    @type network_name: string
+    @param network_name: instance communication network name
+
+    @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
+    @return: L{ganeti.cmdlib.ResultWithJobs} if the instance
+             communication needs to be created or it needs to be
+             connected to a group, otherwise L{None}
+
+    """
+    jobs = []
+
+    try:
+      network_uuid = cfg.LookupNetwork(network_name)
+      network_exists = True
+    except errors.OpPrereqError:
+      network_exists = False
+
+    if not network_exists:
+      jobs.append(AddInstanceCommunicationNetworkOp(network_name))
+
+    for group_uuid in cfg.GetNodeGroupList():
+      group = cfg.GetNodeGroup(group_uuid)
+
+      if network_exists:
+        network_connected = network_uuid in group.networks
+      else:
+        # The network was created asynchronously by the previous
+        # opcode and, therefore, we don't have access to its
+        # network_uuid.  As a result, we assume that the network is
+        # not connected to any group yet.
+        network_connected = False
+
+      if not network_connected:
+        op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
+        jobs.append(op)
+
+    if jobs:
+      return ResultWithJobs([jobs])
+    else:
+      return None
+
+  @staticmethod
+  def _ModifyInstanceCommunicationNetwork(cfg, network_name, feedback_fn):
+    """Update the instance communication network stored in the cluster
+    configuration.
+
+    Compares the user-supplied instance communication network against
+    the one stored in the Ganeti cluster configuration.  If there is a
+    change, the instance communication network may be possibly created
+    and connected to all groups (see
+    L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}).
+
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: cluster configuration
+
+    @type network_name: string
+    @param network_name: instance communication network name
+
+    @type feedback_fn: function
+    @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit}
+
+    @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None}
+    @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}
+
+    """
+    config_network_name = cfg.GetInstanceCommunicationNetwork()
+
+    if network_name == config_network_name:
+      feedback_fn("Instance communication network already is '%s', nothing to"
+                  " do." % network_name)
+    else:
+      try:
+        cfg.LookupNetwork(config_network_name)
+        feedback_fn("Previous instance communication network '%s'"
+                    " should be removed manually." % config_network_name)
+      except errors.OpPrereqError:
+        pass
+
+      if network_name:
+        feedback_fn("Changing instance communication network to '%s', only new"
+                    " instances will be affected."
+                    % network_name)
+      else:
+        feedback_fn("Disabling instance communication network, only new"
+                    " instances will be affected.")
+
+      cfg.SetInstanceCommunicationNetwork(network_name)
+
+      if network_name:
+        return LUClusterSetParams._EnsureInstanceCommunicationNetwork(
+          cfg,
+          network_name)
+      else:
+        return None
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    # re-read the fresh configuration
+    self.cluster = self.cfg.GetClusterInfo()
+    if self.op.enabled_disk_templates:
+      self.cluster.enabled_disk_templates = \
+        list(self.op.enabled_disk_templates)
+    # save the changes
+    self.cfg.Update(self.cluster, feedback_fn)
+
+    self._SetVgName(feedback_fn)
+
+    self.cluster = self.cfg.GetClusterInfo()
+    self._SetFileStorageDir(feedback_fn)
+    self._SetSharedFileStorageDir(feedback_fn)
+    self.cfg.Update(self.cluster, feedback_fn)
+    self._SetDrbdHelper(feedback_fn)
+
+    # re-read the fresh configuration again
+    self.cluster = self.cfg.GetClusterInfo()
+
+    ensure_kvmd = False
+
+    active = constants.DATA_COLLECTOR_STATE_ACTIVE
+    if self.op.enabled_data_collectors is not None:
+      for name, val in self.op.enabled_data_collectors.items():
+        self.cluster.data_collectors[name][active] = val
+
+    if self.op.data_collector_interval:
+      internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL
+      for name, val in self.op.data_collector_interval.items():
+        self.cluster.data_collectors[name][internal] = int(val)
+
+    if self.op.hvparams:
+      self.cluster.hvparams = self.new_hvparams
+    if self.op.os_hvp:
+      self.cluster.os_hvp = self.new_os_hvp
+    if self.op.enabled_hypervisors is not None:
+      self.cluster.hvparams = self.new_hvparams
+      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
+      ensure_kvmd = True
+    if self.op.beparams:
+      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
+    if self.op.nicparams:
+      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
+    if self.op.ipolicy:
+      self.cluster.ipolicy = self.new_ipolicy
+    if self.op.osparams:
+      self.cluster.osparams = self.new_osp
+    if self.op.osparams_private_cluster:
+      self.cluster.osparams_private_cluster = self.new_osp_private
+    if self.op.ndparams:
+      self.cluster.ndparams = self.new_ndparams
+    if self.op.diskparams:
+      self.cluster.diskparams = self.new_diskparams
+    if self.op.hv_state:
+      self.cluster.hv_state_static = self.new_hv_state
+    if self.op.disk_state:
+      self.cluster.disk_state_static = self.new_disk_state
+
+    if self.op.candidate_pool_size is not None:
+      self.cluster.candidate_pool_size = self.op.candidate_pool_size
+      # we need to update the pool size here, otherwise the save will fail
+      AdjustCandidatePool(self, [])
+
+    if self.op.max_running_jobs is not None:
+      self.cluster.max_running_jobs = self.op.max_running_jobs
+
+    if self.op.max_tracked_jobs is not None:
+      self.cluster.max_tracked_jobs = self.op.max_tracked_jobs
+
+    if self.op.maintain_node_health is not None:
+      self.cluster.maintain_node_health = self.op.maintain_node_health
+
+    if self.op.modify_etc_hosts is not None:
+      self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
+
+    if self.op.prealloc_wipe_disks is not None:
+      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
+
+    if self.op.add_uids is not None:
+      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
+
+    if self.op.remove_uids is not None:
+      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
+
+    if self.op.uid_pool is not None:
+      self.cluster.uid_pool = self.op.uid_pool
+
+    if self.op.default_iallocator is not None:
+      self.cluster.default_iallocator = self.op.default_iallocator
+
+    if self.op.default_iallocator_params is not None:
+      self.cluster.default_iallocator_params = self.op.default_iallocator_params
+
+    if self.op.reserved_lvs is not None:
+      self.cluster.reserved_lvs = self.op.reserved_lvs
+
+    if self.op.use_external_mip_script is not None:
+      self.cluster.use_external_mip_script = self.op.use_external_mip_script
+
+    if self.op.enabled_user_shutdown is not None and \
+          self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
+      self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
+      ensure_kvmd = True
+
+    def helper_os(aname, mods, desc):
+      desc += " OS list"
+      lst = getattr(self.cluster, aname)
+      for key, val in mods:
+        if key == constants.DDM_ADD:
+          if val in lst:
+            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
+          else:
+            lst.append(val)
+        elif key == constants.DDM_REMOVE:
+          if val in lst:
+            lst.remove(val)
+          else:
+            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
+        else:
+          raise errors.ProgrammerError("Invalid modification '%s'" % key)
+
+    if self.op.hidden_os:
+      helper_os("hidden_os", self.op.hidden_os, "hidden")
+
+    if self.op.blacklisted_os:
+      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
+
+    if self.op.mac_prefix:
+      self.cluster.mac_prefix = self.op.mac_prefix
+
+    if self.op.master_netdev:
+      master_params = self.cfg.GetMasterNetworkParameters()
+      ems = self.cfg.GetUseExternalMipScript()
+      feedback_fn("Shutting down master ip on the current netdev (%s)" %
+                  self.cluster.master_netdev)
+      result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                       master_params, ems)
+      if not self.op.force:
+        result.Raise("Could not disable the master ip")
+      else:
+        if result.fail_msg:
+          msg = ("Could not disable the master ip (continuing anyway): %s" %
+                 result.fail_msg)
+          feedback_fn(msg)
+      feedback_fn("Changing master_netdev from %s to %s" %
+                  (master_params.netdev, self.op.master_netdev))
+      self.cluster.master_netdev = self.op.master_netdev
+
+    if self.op.master_netmask:
+      master_params = self.cfg.GetMasterNetworkParameters()
+      feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
+      result = self.rpc.call_node_change_master_netmask(
+                 master_params.uuid, master_params.netmask,
+                 self.op.master_netmask, master_params.ip,
+                 master_params.netdev)
+      result.Warn("Could not change the master IP netmask", feedback_fn)
+      self.cluster.master_netmask = self.op.master_netmask
+
+    if self.op.install_image:
+      self.cluster.install_image = self.op.install_image
+
+    if self.op.zeroing_image is not None:
+      CheckImageValidity(self.op.zeroing_image,
+                         "Zeroing image must be an absolute path or a URL")
+      self.cluster.zeroing_image = self.op.zeroing_image
+
+    self.cfg.Update(self.cluster, feedback_fn)
+
+    if self.op.master_netdev:
+      master_params = self.cfg.GetMasterNetworkParameters()
+      feedback_fn("Starting the master ip on the new master netdev (%s)" %
+                  self.op.master_netdev)
+      ems = self.cfg.GetUseExternalMipScript()
+      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+      result.Warn("Could not re-enable the master ip on the master,"
+                  " please restart manually", self.LogWarning)
+
+    # Even though 'self.op.enabled_user_shutdown' is being tested
+    # above, the RPCs can only be done after 'self.cfg.Update' because
+    # this will update the cluster object and sync 'Ssconf', and kvmd
+    # uses 'Ssconf'.
+    if ensure_kvmd:
+      EnsureKvmdOnNodes(self, feedback_fn)
+
+    if self.op.compression_tools is not None:
+      self.cfg.SetCompressionTools(self.op.compression_tools)
+
+    network_name = self.op.instance_communication_network
+    if network_name is not None:
+      return self._ModifyInstanceCommunicationNetwork(self.cfg,
+                                                      network_name, feedback_fn)
+    else:
+      return None
similarity index 55%
rename from lib/cmdlib/cluster.py
rename to lib/cmdlib/cluster/verify.py
index cc2fa01..dfa1294 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
+# Copyright (C) 2014 Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+"""Logical units for cluster verification."""
 
-"""Logical units dealing with the cluster."""
-
-import copy
-import itertools
-import logging
-import operator
-import os
-import re
-import time
-
-from ganeti import compat
-from ganeti import constants
-from ganeti import errors
-from ganeti import hypervisor
-from ganeti import locking
-from ganeti import masterd
-from ganeti import netutils
-from ganeti import objects
-from ganeti import opcodes
-from ganeti import pathutils
-from ganeti import query
-import ganeti.rpc.node as rpc
-from ganeti import runtime
-from ganeti import ssh
-from ganeti import uidpool
-from ganeti import utils
-from ganeti import vcluster
-
-from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
-  ResultWithJobs
-from ganeti.cmdlib.common import ShareAll, RunPostHook, \
-  ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
-  GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
-  GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
-  CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
-  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
-  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
-  CheckDiskAccessModeConsistency, GetClientCertDigest, \
-  AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
-  CheckImageValidity, CheckDiskAccessModeConsistency, EnsureKvmdOnNodes, \
-  WarnAboutFailedSshUpdates
-
-import ganeti.masterd.instance
-
-
-class LUClusterRenewCrypto(NoHooksLU):
-  """Renew the cluster's crypto tokens.
-
-  """
-
-  _MAX_NUM_RETRIES = 3
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {
-      locking.LEVEL_NODE: locking.ALL_SET,
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-    }
-    self.share_locks = ShareAll()
-    self.share_locks[locking.LEVEL_NODE] = 0
-    self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the cluster is empty.
-
-    Any errors are signaled by raising errors.OpPrereqError.
-
-    """
-    self._ssh_renewal_suppressed = \
-      not self.cfg.GetClusterInfo().modify_ssh_setup and self.op.ssh_keys
-
-  def _RenewNodeSslCertificates(self, feedback_fn):
-    """Renews the nodes' SSL certificates.
-
-    Note that most of this operation is done in gnt_cluster.py, this LU only
-    takes care of the renewal of the client SSL certificates.
-
-    """
-    master_uuid = self.cfg.GetMasterNode()
-    cluster = self.cfg.GetClusterInfo()
-
-    logging.debug("Renewing the master's SSL node certificate."
-                  " Master's UUID: %s.", master_uuid)
-
-    # mapping node UUIDs to client certificate digests
-    digest_map = {}
-    master_digest = utils.GetCertificateDigest(
-        cert_filename=pathutils.NODED_CLIENT_CERT_FILE)
-    digest_map[master_uuid] = master_digest
-    logging.debug("Adding the master's SSL node certificate digest to the"
-                  " configuration. Master's UUID: %s, Digest: %s",
-                  master_uuid, master_digest)
-
-    node_errors = {}
-    nodes = self.cfg.GetAllNodesInfo()
-    logging.debug("Renewing non-master nodes' node certificates.")
-    for (node_uuid, node_info) in nodes.items():
-      if node_info.offline:
-        feedback_fn("* Skipping offline node %s" % node_info.name)
-        logging.debug("Skipping offline node %s (UUID: %s).",
-                      node_info.name, node_uuid)
-        continue
-      if node_uuid != master_uuid:
-        logging.debug("Adding certificate digest of node '%s'.", node_uuid)
-        last_exception = None
-        for i in range(self._MAX_NUM_RETRIES):
-          try:
-            if node_info.master_candidate:
-              node_digest = GetClientCertDigest(self, node_uuid)
-              digest_map[node_uuid] = node_digest
-              logging.debug("Added the node's certificate to candidate"
-                            " certificate list. Current list: %s.",
-                            str(cluster.candidate_certs))
-            break
-          except errors.OpExecError as e:
-            last_exception = e
-            logging.error("Could not fetch a non-master node's SSL node"
-                          " certificate at attempt no. %s. The node's UUID"
-                          " is %s, and the error was: %s.",
-                          str(i), node_uuid, e)
-        else:
-          if last_exception:
-            node_errors[node_uuid] = last_exception
-
-    if node_errors:
-      msg = ("Some nodes' SSL client certificates could not be fetched."
-             " Please make sure those nodes are reachable and rerun"
-             " the operation. The affected nodes and their errors are:\n")
-      for uuid, e in node_errors.items():
-        msg += "Node %s: %s\n" % (uuid, e)
-      feedback_fn(msg)
-
-    self.cfg.SetCandidateCerts(digest_map)
-
-  def _RenewSshKeys(self, feedback_fn):
-    """Renew all nodes' SSH keys.
-
-    """
-    master_uuid = self.cfg.GetMasterNode()
-
-    nodes = self.cfg.GetAllNodesInfo()
-    nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info)
-                        in nodes.items() if not node_info.offline]
-    node_names = [name for (_, name) in nodes_uuid_names]
-    node_uuids = [uuid for (uuid, _) in nodes_uuid_names]
-    potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
-    master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
-
-    result = self.rpc.call_node_ssh_keys_renew(
-      [master_uuid],
-      node_uuids, node_names,
-      master_candidate_uuids,
-      potential_master_candidates)
-
-    # Check if there were serious errors (for example master key files not
-    # writable).
-    result[master_uuid].Raise("Could not renew the SSH keys of all nodes")
-
-    # Process any non-disruptive errors (a few nodes unreachable etc.)
-    WarnAboutFailedSshUpdates(result, master_uuid, feedback_fn)
-
-  def Exec(self, feedback_fn):
-    if self.op.node_certificates:
-      feedback_fn("Renewing Node SSL certificates")
-      self._RenewNodeSslCertificates(feedback_fn)
-    if self.op.ssh_keys and not self._ssh_renewal_suppressed:
-      feedback_fn("Renewing SSH keys")
-      self._RenewSshKeys(feedback_fn)
-    elif self._ssh_renewal_suppressed:
-      feedback_fn("Cannot renew SSH keys if the cluster is configured to not"
-                  " modify the SSH setup.")
-
-
-class LUClusterActivateMasterIp(NoHooksLU):
-  """Activate the master IP on the master node.
-
-  """
-  def Exec(self, feedback_fn):
-    """Activate the master IP.
-
-    """
-    master_params = self.cfg.GetMasterNetworkParameters()
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_activate_master_ip(master_params.uuid,
-                                                   master_params, ems)
-    result.Raise("Could not activate the master IP")
-
-
-class LUClusterDeactivateMasterIp(NoHooksLU):
-  """Deactivate the master IP on the master node.
-
-  """
-  def Exec(self, feedback_fn):
-    """Deactivate the master IP.
-
-    """
-    master_params = self.cfg.GetMasterNetworkParameters()
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-    result.Raise("Could not deactivate the master IP")
-
-
-class LUClusterConfigQuery(NoHooksLU):
-  """Return configuration values.
-
-  """
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    self.cq = ClusterQuery(None, self.op.output_fields, False)
-
-  def ExpandNames(self):
-    self.cq.ExpandNames(self)
-
-  def DeclareLocks(self, level):
-    self.cq.DeclareLocks(self, level)
-
-  def Exec(self, feedback_fn):
-    result = self.cq.OldStyleQuery(self)
-
-    assert len(result) == 1
-
-    return result[0]
-
-
-class LUClusterDestroy(LogicalUnit):
-  """Logical unit for destroying the cluster.
-
-  """
-  HPATH = "cluster-destroy"
-  HTYPE = constants.HTYPE_CLUSTER
-
-  # Read by the job queue to detect when the cluster is gone and job files will
-  # never be available.
-  # FIXME: This variable should be removed together with the Python job queue.
-  clusterHasBeenDestroyed = False
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([], [])
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the cluster is empty.
-
-    Any errors are signaled by raising errors.OpPrereqError.
-
-    """
-    master = self.cfg.GetMasterNode()
-
-    nodelist = self.cfg.GetNodeList()
-    if len(nodelist) != 1 or nodelist[0] != master:
-      raise errors.OpPrereqError("There are still %d node(s) in"
-                                 " this cluster." % (len(nodelist) - 1),
-                                 errors.ECODE_INVAL)
-    instancelist = self.cfg.GetInstanceList()
-    if instancelist:
-      raise errors.OpPrereqError("There are still %d instance(s) in"
-                                 " this cluster." % len(instancelist),
-                                 errors.ECODE_INVAL)
-
-  def Exec(self, feedback_fn):
-    """Destroys the cluster.
-
-    """
-    master_params = self.cfg.GetMasterNetworkParameters()
-
-    # Run post hooks on master node before it's removed
-    RunPostHook(self, self.cfg.GetNodeName(master_params.uuid))
-
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-    result.Warn("Error disabling the master IP address", self.LogWarning)
-
-    self.wconfd.Client().PrepareClusterDestruction(self.wconfdcontext)
-
-    # signal to the job queue that the cluster is gone
-    LUClusterDestroy.clusterHasBeenDestroyed = True
-
-    return master_params.uuid
-
-
-class LUClusterPostInit(LogicalUnit):
-  """Logical unit for running hooks after cluster initialization.
-
-  """
-  HPATH = "cluster-init"
-  HTYPE = constants.HTYPE_CLUSTER
-
-  def CheckArguments(self):
-    self.master_uuid = self.cfg.GetMasterNode()
-    self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
-
-    # TODO: When Issue 584 is solved, and None is properly parsed when used
-    # as a default value, ndparams.get(.., None) can be changed to
-    # ndparams[..] to access the values directly
-
-    # OpenvSwitch: Warn user if link is missing
-    if (self.master_ndparams[constants.ND_OVS] and not
-        self.master_ndparams.get(constants.ND_OVS_LINK, None)):
-      self.LogInfo("No physical interface for OpenvSwitch was given."
-                   " OpenvSwitch will not have an outside connection. This"
-                   " might not be what you want.")
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([], [self.cfg.GetMasterNode()])
-
-  def Exec(self, feedback_fn):
-    """Create and configure Open vSwitch
-
-    """
-    if self.master_ndparams[constants.ND_OVS]:
-      result = self.rpc.call_node_configure_ovs(
-                 self.master_uuid,
-                 self.master_ndparams[constants.ND_OVS_NAME],
-                 self.master_ndparams.get(constants.ND_OVS_LINK, None))
-      result.Raise("Could not successully configure Open vSwitch")
-
-    return True
-
-
-class ClusterQuery(QueryBase):
-  FIELDS = query.CLUSTER_FIELDS
-
-  #: Do not sort (there is only one item)
-  SORT_FIELD = None
-
-  def ExpandNames(self, lu):
-    lu.needed_locks = {}
-
-    # The following variables interact with _QueryBase._GetNames
-    self.wanted = locking.ALL_SET
-    self.do_locking = self.use_locking
-
-    if self.do_locking:
-      raise errors.OpPrereqError("Can not use locking for cluster queries",
-                                 errors.ECODE_INVAL)
-
-  def DeclareLocks(self, lu, level):
-    pass
-
-  def _GetQueryData(self, lu):
-    """Computes the list of nodes and their attributes.
-
-    """
-    if query.CQ_CONFIG in self.requested_data:
-      cluster = lu.cfg.GetClusterInfo()
-      nodes = lu.cfg.GetAllNodesInfo()
-    else:
-      cluster = NotImplemented
-      nodes = NotImplemented
-
-    if query.CQ_QUEUE_DRAINED in self.requested_data:
-      drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
-    else:
-      drain_flag = NotImplemented
-
-    if query.CQ_WATCHER_PAUSE in self.requested_data:
-      master_node_uuid = lu.cfg.GetMasterNode()
-
-      result = lu.rpc.call_get_watcher_pause(master_node_uuid)
-      result.Raise("Can't retrieve watcher pause from master node '%s'" %
-                   lu.cfg.GetMasterNodeName())
-
-      watcher_pause = result.payload
-    else:
-      watcher_pause = NotImplemented
-
-    return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
-
-
-class LUClusterQuery(NoHooksLU):
-  """Query cluster configuration.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {}
-
-  def Exec(self, feedback_fn):
-    """Return cluster config.
-
-    """
-    cluster = self.cfg.GetClusterInfo()
-    os_hvp = {}
-
-    # Filter just for enabled hypervisors
-    for os_name, hv_dict in cluster.os_hvp.items():
-      os_hvp[os_name] = {}
-      for hv_name, hv_params in hv_dict.items():
-        if hv_name in cluster.enabled_hypervisors:
-          os_hvp[os_name][hv_name] = hv_params
-
-    # Convert ip_family to ip_version
-    primary_ip_version = constants.IP4_VERSION
-    if cluster.primary_ip_family == netutils.IP6Address.family:
-      primary_ip_version = constants.IP6_VERSION
-
-    result = {
-      "software_version": constants.RELEASE_VERSION,
-      "protocol_version": constants.PROTOCOL_VERSION,
-      "config_version": constants.CONFIG_VERSION,
-      "os_api_version": max(constants.OS_API_VERSIONS),
-      "export_version": constants.EXPORT_VERSION,
-      "vcs_version": constants.VCS_VERSION,
-      "architecture": runtime.GetArchInfo(),
-      "name": cluster.cluster_name,
-      "master": self.cfg.GetMasterNodeName(),
-      "default_hypervisor": cluster.primary_hypervisor,
-      "enabled_hypervisors": cluster.enabled_hypervisors,
-      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
-                        for hypervisor_name in cluster.enabled_hypervisors]),
-      "os_hvp": os_hvp,
-      "beparams": cluster.beparams,
-      "osparams": cluster.osparams,
-      "ipolicy": cluster.ipolicy,
-      "nicparams": cluster.nicparams,
-      "ndparams": cluster.ndparams,
-      "diskparams": cluster.diskparams,
-      "candidate_pool_size": cluster.candidate_pool_size,
-      "max_running_jobs": cluster.max_running_jobs,
-      "max_tracked_jobs": cluster.max_tracked_jobs,
-      "mac_prefix": cluster.mac_prefix,
-      "master_netdev": cluster.master_netdev,
-      "master_netmask": cluster.master_netmask,
-      "use_external_mip_script": cluster.use_external_mip_script,
-      "volume_group_name": cluster.volume_group_name,
-      "drbd_usermode_helper": cluster.drbd_usermode_helper,
-      "file_storage_dir": cluster.file_storage_dir,
-      "shared_file_storage_dir": cluster.shared_file_storage_dir,
-      "maintain_node_health": cluster.maintain_node_health,
-      "ctime": cluster.ctime,
-      "mtime": cluster.mtime,
-      "uuid": cluster.uuid,
-      "tags": list(cluster.GetTags()),
-      "uid_pool": cluster.uid_pool,
-      "default_iallocator": cluster.default_iallocator,
-      "default_iallocator_params": cluster.default_iallocator_params,
-      "reserved_lvs": cluster.reserved_lvs,
-      "primary_ip_version": primary_ip_version,
-      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
-      "hidden_os": cluster.hidden_os,
-      "blacklisted_os": cluster.blacklisted_os,
-      "enabled_disk_templates": cluster.enabled_disk_templates,
-      "install_image": cluster.install_image,
-      "instance_communication_network": cluster.instance_communication_network,
-      "compression_tools": cluster.compression_tools,
-      "enabled_user_shutdown": cluster.enabled_user_shutdown,
-      }
-
-    return result
-
-
-class LUClusterRedistConf(NoHooksLU):
-  """Force the redistribution of cluster configuration.
-
-  This is a very simple LU.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {
-      locking.LEVEL_NODE: locking.ALL_SET,
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-    }
-    self.share_locks = ShareAll()
-
-  def Exec(self, feedback_fn):
-    """Redistribute the configuration.
-
-    """
-    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
-    RedistributeAncillaryFiles(self)
-
-
-class LUClusterRename(LogicalUnit):
-  """Rename the cluster.
-
-  """
-  HPATH = "cluster-rename"
-  HTYPE = constants.HTYPE_CLUSTER
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      "NEW_NAME": self.op.name,
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
-
-  def CheckPrereq(self):
-    """Verify that the passed name is a valid one.
-
-    """
-    hostname = netutils.GetHostname(name=self.op.name,
-                                    family=self.cfg.GetPrimaryIPFamily())
-
-    new_name = hostname.name
-    self.ip = new_ip = hostname.ip
-    old_name = self.cfg.GetClusterName()
-    old_ip = self.cfg.GetMasterIP()
-    if new_name == old_name and new_ip == old_ip:
-      raise errors.OpPrereqError("Neither the name nor the IP address of the"
-                                 " cluster has changed",
-                                 errors.ECODE_INVAL)
-    if new_ip != old_ip:
-      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
-        raise errors.OpPrereqError("The given cluster IP address (%s) is"
-                                   " reachable on the network" %
-                                   new_ip, errors.ECODE_NOTUNIQUE)
-
-    self.op.name = new_name
-
-  def Exec(self, feedback_fn):
-    """Rename the cluster.
-
-    """
-    clustername = self.op.name
-    new_ip = self.ip
-
-    # shutdown the master IP
-    master_params = self.cfg.GetMasterNetworkParameters()
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-    result.Raise("Could not disable the master role")
-
-    try:
-      cluster = self.cfg.GetClusterInfo()
-      cluster.cluster_name = clustername
-      cluster.master_ip = new_ip
-      self.cfg.Update(cluster, feedback_fn)
-
-      # update the known hosts file
-      ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
-      node_list = self.cfg.GetOnlineNodeList()
-      try:
-        node_list.remove(master_params.uuid)
-      except ValueError:
-        pass
-      UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
-    finally:
-      master_params.ip = new_ip
-      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-      result.Warn("Could not re-enable the master role on the master,"
-                  " please restart manually", self.LogWarning)
-
-    return clustername
-
-
-class LUClusterRepairDiskSizes(NoHooksLU):
-  """Verifies the cluster disks sizes.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    if self.op.instances:
-      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
-      # Not getting the node allocation lock as only a specific set of
-      # instances (and their nodes) is going to be acquired
-      self.needed_locks = {
-        locking.LEVEL_NODE_RES: [],
-        locking.LEVEL_INSTANCE: self.wanted_names,
-        }
-      self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
-    else:
-      self.wanted_names = None
-      self.needed_locks = {
-        locking.LEVEL_NODE_RES: locking.ALL_SET,
-        locking.LEVEL_INSTANCE: locking.ALL_SET,
-
-        # This opcode is acquires the node locks for all instances
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-        }
-
-    self.share_locks = {
-      locking.LEVEL_NODE_RES: 1,
-      locking.LEVEL_INSTANCE: 0,
-      locking.LEVEL_NODE_ALLOC: 1,
-      }
-
-  def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
-      self._LockInstancesNodes(primary_only=True, level=level)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This only checks the optional instance list against the existing names.
-
-    """
-    if self.wanted_names is None:
-      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
-
-    self.wanted_instances = \
-        map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
-
-  def _EnsureChildSizes(self, disk):
-    """Ensure children of the disk have the needed disk size.
-
-    This is valid mainly for DRBD8 and fixes an issue where the
-    children have smaller disk size.
-
-    @param disk: an L{ganeti.objects.Disk} object
-
-    """
-    if disk.dev_type == constants.DT_DRBD8:
-      assert disk.children, "Empty children for DRBD8?"
-      fchild = disk.children[0]
-      mismatch = fchild.size < disk.size
-      if mismatch:
-        self.LogInfo("Child disk has size %d, parent %d, fixing",
-                     fchild.size, disk.size)
-        fchild.size = disk.size
-
-      # and we recurse on this child only, not on the metadev
-      return self._EnsureChildSizes(fchild) or mismatch
-    else:
-      return False
-
-  def Exec(self, feedback_fn):
-    """Verify the size of cluster disks.
-
-    """
-    # TODO: check child disks too
-    # TODO: check differences in size between primary/secondary nodes
-    per_node_disks = {}
-    for instance in self.wanted_instances:
-      pnode = instance.primary_node
-      if pnode not in per_node_disks:
-        per_node_disks[pnode] = []
-      for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
-        per_node_disks[pnode].append((instance, idx, disk))
-
-    assert not (frozenset(per_node_disks.keys()) -
-                frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \
-      "Not owning correct locks"
-    assert not self.owned_locks(locking.LEVEL_NODE)
-
-    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
-                                               per_node_disks.keys())
-
-    changed = []
-    for node_uuid, dskl in per_node_disks.items():
-      if not dskl:
-        # no disks on the node
-        continue
-
-      newl = [([v[2].Copy()], v[0]) for v in dskl]
-      node_name = self.cfg.GetNodeName(node_uuid)
-      result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
-      if result.fail_msg:
-        self.LogWarning("Failure in blockdev_getdimensions call to node"
-                        " %s, ignoring", node_name)
-        continue
-      if len(result.payload) != len(dskl):
-        logging.warning("Invalid result from node %s: len(dksl)=%d,"
-                        " result.payload=%s", node_name, len(dskl),
-                        result.payload)
-        self.LogWarning("Invalid result from node %s, ignoring node results",
-                        node_name)
-        continue
-      for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
-        if dimensions is None:
-          self.LogWarning("Disk %d of instance %s did not return size"
-                          " information, ignoring", idx, instance.name)
-          continue
-        if not isinstance(dimensions, (tuple, list)):
-          self.LogWarning("Disk %d of instance %s did not return valid"
-                          " dimension information, ignoring", idx,
-                          instance.name)
-          continue
-        (size, spindles) = dimensions
-        if not isinstance(size, (int, long)):
-          self.LogWarning("Disk %d of instance %s did not return valid"
-                          " size information, ignoring", idx, instance.name)
-          continue
-        size = size >> 20
-        if size != disk.size:
-          self.LogInfo("Disk %d of instance %s has mismatched size,"
-                       " correcting: recorded %d, actual %d", idx,
-                       instance.name, disk.size, size)
-          disk.size = size
-          self.cfg.Update(disk, feedback_fn)
-          changed.append((instance.name, idx, "size", size))
-        if es_flags[node_uuid]:
-          if spindles is None:
-            self.LogWarning("Disk %d of instance %s did not return valid"
-                            " spindles information, ignoring", idx,
-                            instance.name)
-          elif disk.spindles is None or disk.spindles != spindles:
-            self.LogInfo("Disk %d of instance %s has mismatched spindles,"
-                         " correcting: recorded %s, actual %s",
-                         idx, instance.name, disk.spindles, spindles)
-            disk.spindles = spindles
-            self.cfg.Update(disk, feedback_fn)
-            changed.append((instance.name, idx, "spindles", disk.spindles))
-        if self._EnsureChildSizes(disk):
-          self.cfg.Update(disk, feedback_fn)
-          changed.append((instance.name, idx, "size", disk.size))
-    return changed
-
-
-def _ValidateNetmask(cfg, netmask):
-  """Checks if a netmask is valid.
-
-  @type cfg: L{config.ConfigWriter}
-  @param cfg: cluster configuration
-  @type netmask: int
-  @param netmask: netmask to be verified
-  @raise errors.OpPrereqError: if the validation fails
-
-  """
-  ip_family = cfg.GetPrimaryIPFamily()
-  try:
-    ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
-  except errors.ProgrammerError:
-    raise errors.OpPrereqError("Invalid primary ip family: %s." %
-                               ip_family, errors.ECODE_INVAL)
-  if not ipcls.ValidateNetmask(netmask):
-    raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
-                               (netmask), errors.ECODE_INVAL)
-
-
-def CheckFileBasedStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates,
-    file_disk_template):
-  """Checks whether the given file-based storage directory is acceptable.
-
-  Note: This function is public, because it is also used in bootstrap.py.
-
-  @type logging_warn_fn: function
-  @param logging_warn_fn: function which accepts a string and logs it
-  @type file_storage_dir: string
-  @param file_storage_dir: the directory to be used for file-based instances
-  @type enabled_disk_templates: list of string
-  @param enabled_disk_templates: the list of enabled disk templates
-  @type file_disk_template: string
-  @param file_disk_template: the file-based disk template for which the
-      path should be checked
-
-  """
-  assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
-            constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
-         ))
-
-  file_storage_enabled = file_disk_template in enabled_disk_templates
-  if file_storage_dir is not None:
-    if file_storage_dir == "":
-      if file_storage_enabled:
-        raise errors.OpPrereqError(
-            "Unsetting the '%s' storage directory while having '%s' storage"
-            " enabled is not permitted." %
-            (file_disk_template, file_disk_template),
-            errors.ECODE_INVAL)
-    else:
-      if not file_storage_enabled:
-        logging_warn_fn(
-            "Specified a %s storage directory, although %s storage is not"
-            " enabled." % (file_disk_template, file_disk_template))
-  else:
-    raise errors.ProgrammerError("Received %s storage dir with value"
-                                 " 'None'." % file_disk_template)
-
-
-def CheckFileStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates):
-  """Checks whether the given file storage directory is acceptable.
-
-  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
-
-  """
-  CheckFileBasedStoragePathVsEnabledDiskTemplates(
-      logging_warn_fn, file_storage_dir, enabled_disk_templates,
-      constants.DT_FILE)
-
-
-def CheckSharedFileStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates):
-  """Checks whether the given shared file storage directory is acceptable.
-
-  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
-
-  """
-  CheckFileBasedStoragePathVsEnabledDiskTemplates(
-      logging_warn_fn, file_storage_dir, enabled_disk_templates,
-      constants.DT_SHARED_FILE)
-
-
-def CheckGlusterStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates):
-  """Checks whether the given gluster storage directory is acceptable.
-
-  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
-
-  """
-  CheckFileBasedStoragePathVsEnabledDiskTemplates(
-      logging_warn_fn, file_storage_dir, enabled_disk_templates,
-      constants.DT_GLUSTER)
-
-
-def CheckCompressionTools(tools):
-  """Check whether the provided compression tools look like executables.
-
-  @type tools: list of string
-  @param tools: The tools provided as opcode input
-
-  """
-  regex = re.compile('^[-_a-zA-Z0-9]+$')
-  illegal_tools = [t for t in tools if not regex.match(t)]
-
-  if illegal_tools:
-    raise errors.OpPrereqError(
-      "The tools '%s' contain illegal characters: only alphanumeric values,"
-      " dashes, and underscores are allowed" % ", ".join(illegal_tools),
-      errors.ECODE_INVAL
-    )
-
-  if constants.IEC_GZIP not in tools:
-    raise errors.OpPrereqError("For compatibility reasons, the %s utility must"
-                               " be present among the compression tools" %
-                               constants.IEC_GZIP, errors.ECODE_INVAL)
-
-  if constants.IEC_NONE in tools:
-    raise errors.OpPrereqError("%s is a reserved value used for no compression,"
-                               " and cannot be used as the name of a tool" %
-                               constants.IEC_NONE, errors.ECODE_INVAL)
-
-
-class LUClusterSetParams(LogicalUnit):
-  """Change the parameters of the cluster.
-
-  """
-  HPATH = "cluster-modify"
-  HTYPE = constants.HTYPE_CLUSTER
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    """Check parameters
-
-    """
-    if self.op.uid_pool:
-      uidpool.CheckUidPool(self.op.uid_pool)
-
-    if self.op.add_uids:
-      uidpool.CheckUidPool(self.op.add_uids)
-
-    if self.op.remove_uids:
-      uidpool.CheckUidPool(self.op.remove_uids)
-
-    if self.op.mac_prefix:
-      self.op.mac_prefix = \
-          utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
-
-    if self.op.master_netmask is not None:
-      _ValidateNetmask(self.cfg, self.op.master_netmask)
-
-    if self.op.diskparams:
-      for dt_params in self.op.diskparams.values():
-        utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
-      try:
-        utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
-        CheckDiskAccessModeValidity(self.op.diskparams)
-      except errors.OpPrereqError, err:
-        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
-                                   errors.ECODE_INVAL)
-
-    if self.op.install_image is not None:
-      CheckImageValidity(self.op.install_image,
-                         "Install image must be an absolute path or a URL")
-
-  def ExpandNames(self):
-    # FIXME: in the future maybe other cluster params won't require checking on
-    # all nodes to be modified.
-    # FIXME: This opcode changes cluster-wide settings. Is acquiring all
-    # resource locks the right thing, shouldn't it be the BGL instead?
-    self.needed_locks = {
-      locking.LEVEL_NODE: locking.ALL_SET,
-      locking.LEVEL_INSTANCE: locking.ALL_SET,
-      locking.LEVEL_NODEGROUP: locking.ALL_SET,
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-    }
-    self.share_locks = ShareAll()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      "NEW_VG_NAME": self.op.vg_name,
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    mn = self.cfg.GetMasterNode()
-    return ([mn], [mn])
-
-  def _CheckVgName(self, node_uuids, enabled_disk_templates,
-                   new_enabled_disk_templates):
-    """Check the consistency of the vg name on all nodes and in case it gets
-       unset whether there are instances still using it.
-
-    """
-    lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
-    lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
-                                            new_enabled_disk_templates)
-    current_vg_name = self.cfg.GetVGName()
-
-    if self.op.vg_name == '':
-      if lvm_is_enabled:
-        raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
-                                   " disk templates are or get enabled.",
-                                   errors.ECODE_INVAL)
-
-    if self.op.vg_name is None:
-      if current_vg_name is None and lvm_is_enabled:
-        raise errors.OpPrereqError("Please specify a volume group when"
-                                   " enabling lvm-based disk-templates.",
-                                   errors.ECODE_INVAL)
-
-    if self.op.vg_name is not None and not self.op.vg_name:
-      if self.cfg.HasAnyDiskOfType(constants.DT_PLAIN):
-        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
-                                   " instances exist", errors.ECODE_INVAL)
-
-    if (self.op.vg_name is not None and lvm_is_enabled) or \
-        (self.cfg.GetVGName() is not None and lvm_gets_enabled):
-      self._CheckVgNameOnNodes(node_uuids)
-
-  def _CheckVgNameOnNodes(self, node_uuids):
-    """Check the status of the volume group on each node.
-
-    """
-    vglist = self.rpc.call_vg_list(node_uuids)
-    for node_uuid in node_uuids:
-      msg = vglist[node_uuid].fail_msg
-      if msg:
-        # ignoring down node
-        self.LogWarning("Error while gathering data on node %s"
-                        " (ignoring node): %s",
-                        self.cfg.GetNodeName(node_uuid), msg)
-        continue
-      vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload,
-                                            self.op.vg_name,
-                                            constants.MIN_VG_SIZE)
-      if vgstatus:
-        raise errors.OpPrereqError("Error on node '%s': %s" %
-                                   (self.cfg.GetNodeName(node_uuid), vgstatus),
-                                   errors.ECODE_ENVIRON)
-
-  @staticmethod
-  def _GetDiskTemplateSetsInner(op_enabled_disk_templates,
-                                old_enabled_disk_templates):
-    """Computes three sets of disk templates.
-
-    @see: C{_GetDiskTemplateSets} for more details.
-
-    """
-    enabled_disk_templates = None
-    new_enabled_disk_templates = []
-    disabled_disk_templates = []
-    if op_enabled_disk_templates:
-      enabled_disk_templates = op_enabled_disk_templates
-      new_enabled_disk_templates = \
-        list(set(enabled_disk_templates)
-             - set(old_enabled_disk_templates))
-      disabled_disk_templates = \
-        list(set(old_enabled_disk_templates)
-             - set(enabled_disk_templates))
-    else:
-      enabled_disk_templates = old_enabled_disk_templates
-    return (enabled_disk_templates, new_enabled_disk_templates,
-            disabled_disk_templates)
-
-  def _GetDiskTemplateSets(self, cluster):
-    """Computes three sets of disk templates.
-
-    The three sets are:
-      - disk templates that will be enabled after this operation (no matter if
-        they were enabled before or not)
-      - disk templates that get enabled by this operation (thus haven't been
-        enabled before.)
-      - disk templates that get disabled by this operation
-
-    """
-    return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates,
-                                          cluster.enabled_disk_templates)
-
-  def _CheckIpolicy(self, cluster, enabled_disk_templates):
-    """Checks the ipolicy.
-
-    @type cluster: C{objects.Cluster}
-    @param cluster: the cluster's configuration
-    @type enabled_disk_templates: list of string
-    @param enabled_disk_templates: list of (possibly newly) enabled disk
-      templates
-
-    """
-    # FIXME: write unit tests for this
-    if self.op.ipolicy:
-      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
-                                           group_policy=False)
-
-      CheckIpolicyVsDiskTemplates(self.new_ipolicy,
-                                  enabled_disk_templates)
-
-      all_instances = self.cfg.GetAllInstancesInfo().values()
-      violations = set()
-      for group in self.cfg.GetAllNodeGroupsInfo().values():
-        instances = frozenset(
-          [inst for inst in all_instances
-           if compat.any(nuuid in group.members
-           for nuuid in self.cfg.GetInstanceNodes(inst.uuid))])
-        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
-        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
-        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
-                                           self.cfg)
-        if new:
-          violations.update(new)
-
-      if violations:
-        self.LogWarning("After the ipolicy change the following instances"
-                        " violate them: %s",
-                        utils.CommaJoin(utils.NiceSort(violations)))
-    else:
-      CheckIpolicyVsDiskTemplates(cluster.ipolicy,
-                                  enabled_disk_templates)
-
-  def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
-    """Checks whether the set DRBD helper actually exists on the nodes.
-
-    @type drbd_helper: string
-    @param drbd_helper: path of the drbd usermode helper binary
-    @type node_uuids: list of strings
-    @param node_uuids: list of node UUIDs to check for the helper
-
-    """
-    # checks given drbd helper on all nodes
-    helpers = self.rpc.call_drbd_helper(node_uuids)
-    for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
-      if ninfo.offline:
-        self.LogInfo("Not checking drbd helper on offline node %s",
-                     ninfo.name)
-        continue
-      msg = helpers[ninfo.uuid].fail_msg
-      if msg:
-        raise errors.OpPrereqError("Error checking drbd helper on node"
-                                   " '%s': %s" % (ninfo.name, msg),
-                                   errors.ECODE_ENVIRON)
-      node_helper = helpers[ninfo.uuid].payload
-      if node_helper != drbd_helper:
-        raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
-                                   (ninfo.name, node_helper),
-                                   errors.ECODE_ENVIRON)
-
-  def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
-    """Check the DRBD usermode helper.
-
-    @type node_uuids: list of strings
-    @param node_uuids: a list of nodes' UUIDs
-    @type drbd_enabled: boolean
-    @param drbd_enabled: whether DRBD will be enabled after this operation
-      (no matter if it was disabled before or not)
-    @type drbd_gets_enabled: boolen
-    @param drbd_gets_enabled: true if DRBD was disabled before this
-      operation, but will be enabled afterwards
-
-    """
-    if self.op.drbd_helper == '':
-      if drbd_enabled:
-        raise errors.OpPrereqError("Cannot disable drbd helper while"
-                                   " DRBD is enabled.", errors.ECODE_STATE)
-      if self.cfg.HasAnyDiskOfType(constants.DT_DRBD8):
-        raise errors.OpPrereqError("Cannot disable drbd helper while"
-                                   " drbd-based instances exist",
-                                   errors.ECODE_INVAL)
-
-    else:
-      if self.op.drbd_helper is not None and drbd_enabled:
-        self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
-      else:
-        if drbd_gets_enabled:
-          current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
-          if current_drbd_helper is not None:
-            self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
-          else:
-            raise errors.OpPrereqError("Cannot enable DRBD without a"
-                                       " DRBD usermode helper set.",
-                                       errors.ECODE_STATE)
-
-  def _CheckInstancesOfDisabledDiskTemplates(
-      self, disabled_disk_templates):
-    """Check whether we try to disable a disk template that is in use.
-
-    @type disabled_disk_templates: list of string
-    @param disabled_disk_templates: list of disk templates that are going to
-      be disabled by this operation
-
-    """
-    for disk_template in disabled_disk_templates:
-      if self.cfg.HasAnyDiskOfType(disk_template):
-        raise errors.OpPrereqError(
-            "Cannot disable disk template '%s', because there is at least one"
-            " instance using it." % disk_template,
-            errors.ECODE_STATE)
-
-  @staticmethod
-  def _CheckInstanceCommunicationNetwork(network, warning_fn):
-    """Check whether an existing network is configured for instance
-    communication.
-
-    Checks whether an existing network is configured with the
-    parameters that are advisable for instance communication, and
-    otherwise issue security warnings.
-
-    @type network: L{ganeti.objects.Network}
-    @param network: L{ganeti.objects.Network} object whose
-                    configuration is being checked
-    @type warning_fn: function
-    @param warning_fn: function used to print warnings
-    @rtype: None
-    @return: None
-
-    """
-    def _MaybeWarn(err, val, default):
-      if val != default:
-        warning_fn("Supplied instance communication network '%s' %s '%s',"
-                   " this might pose a security risk (default is '%s').",
-                   network.name, err, val, default)
-
-    if network.network is None:
-      raise errors.OpPrereqError("Supplied instance communication network '%s'"
-                                 " must have an IPv4 network address.",
-                                 network.name)
-
-    _MaybeWarn("has an IPv4 gateway", network.gateway, None)
-    _MaybeWarn("has a non-standard IPv4 network address", network.network,
-               constants.INSTANCE_COMMUNICATION_NETWORK4)
-    _MaybeWarn("has an IPv6 gateway", network.gateway6, None)
-    _MaybeWarn("has a non-standard IPv6 network address", network.network6,
-               constants.INSTANCE_COMMUNICATION_NETWORK6)
-    _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix,
-               constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the given params don't conflict and
-    if the given volume group is valid.
-
-    """
-    node_uuids = self.owned_locks(locking.LEVEL_NODE)
-    self.cluster = cluster = self.cfg.GetClusterInfo()
-
-    vm_capable_node_uuids = [node.uuid
-                             for node in self.cfg.GetAllNodesInfo().values()
-                             if node.uuid in node_uuids and node.vm_capable]
-
-    (enabled_disk_templates, new_enabled_disk_templates,
-      disabled_disk_templates) = self._GetDiskTemplateSets(cluster)
-    self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates)
-
-    self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
-                      new_enabled_disk_templates)
-
-    if self.op.file_storage_dir is not None:
-      CheckFileStoragePathVsEnabledDiskTemplates(
-          self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
-
-    if self.op.shared_file_storage_dir is not None:
-      CheckSharedFileStoragePathVsEnabledDiskTemplates(
-          self.LogWarning, self.op.shared_file_storage_dir,
-          enabled_disk_templates)
-
-    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
-    drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
-    self._CheckDrbdHelper(vm_capable_node_uuids,
-                          drbd_enabled, drbd_gets_enabled)
-
-    # validate params changes
-    if self.op.beparams:
-      objects.UpgradeBeParams(self.op.beparams)
-      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
-
-    if self.op.ndparams:
-      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
-      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
-
-      # TODO: we need a more general way to handle resetting
-      # cluster-level parameters to default values
-      if self.new_ndparams["oob_program"] == "":
-        self.new_ndparams["oob_program"] = \
-            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
-
-    if self.op.hv_state:
-      new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
-                                           self.cluster.hv_state_static)
-      self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
-                               for hv, values in new_hv_state.items())
-
-    if self.op.disk_state:
-      new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
-                                               self.cluster.disk_state_static)
-      self.new_disk_state = \
-        dict((storage, dict((name, cluster.SimpleFillDiskState(values))
-                            for name, values in svalues.items()))
-             for storage, svalues in new_disk_state.items())
-
-    self._CheckIpolicy(cluster, enabled_disk_templates)
-
-    if self.op.nicparams:
-      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
-      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
-      objects.NIC.CheckParameterSyntax(self.new_nicparams)
-      nic_errors = []
-
-      # check all instances for consistency
-      for instance in self.cfg.GetAllInstancesInfo().values():
-        for nic_idx, nic in enumerate(instance.nics):
-          params_copy = copy.deepcopy(nic.nicparams)
-          params_filled = objects.FillDict(self.new_nicparams, params_copy)
-
-          # check parameter syntax
-          try:
-            objects.NIC.CheckParameterSyntax(params_filled)
-          except errors.ConfigurationError, err:
-            nic_errors.append("Instance %s, nic/%d: %s" %
-                              (instance.name, nic_idx, err))
-
-          # if we're moving instances to routed, check that they have an ip
-          target_mode = params_filled[constants.NIC_MODE]
-          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
-            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
-                              " address" % (instance.name, nic_idx))
-      if nic_errors:
-        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
-                                   "\n".join(nic_errors), errors.ECODE_INVAL)
-
-    # hypervisor list/parameters
-    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
-    if self.op.hvparams:
-      for hv_name, hv_dict in self.op.hvparams.items():
-        if hv_name not in self.new_hvparams:
-          self.new_hvparams[hv_name] = hv_dict
-        else:
-          self.new_hvparams[hv_name].update(hv_dict)
-
-    # disk template parameters
-    self.new_diskparams = objects.FillDict(cluster.diskparams, {})
-    if self.op.diskparams:
-      for dt_name, dt_params in self.op.diskparams.items():
-        if dt_name not in self.new_diskparams:
-          self.new_diskparams[dt_name] = dt_params
-        else:
-          self.new_diskparams[dt_name].update(dt_params)
-      CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
-
-    # os hypervisor parameters
-    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
-    if self.op.os_hvp:
-      for os_name, hvs in self.op.os_hvp.items():
-        if os_name not in self.new_os_hvp:
-          self.new_os_hvp[os_name] = hvs
-        else:
-          for hv_name, hv_dict in hvs.items():
-            if hv_dict is None:
-              # Delete if it exists
-              self.new_os_hvp[os_name].pop(hv_name, None)
-            elif hv_name not in self.new_os_hvp[os_name]:
-              self.new_os_hvp[os_name][hv_name] = hv_dict
-            else:
-              self.new_os_hvp[os_name][hv_name].update(hv_dict)
-
-    # os parameters
-    self._BuildOSParams(cluster)
-
-    # changes to the hypervisor list
-    if self.op.enabled_hypervisors is not None:
-      for hv in self.op.enabled_hypervisors:
-        # if the hypervisor doesn't already exist in the cluster
-        # hvparams, we initialize it to empty, and then (in both
-        # cases) we make sure to fill the defaults, as we might not
-        # have a complete defaults list if the hypervisor wasn't
-        # enabled before
-        if hv not in new_hvp:
-          new_hvp[hv] = {}
-        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
-        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
-
-    if self.op.hvparams or self.op.enabled_hypervisors is not None:
-      # either the enabled list has changed, or the parameters have, validate
-      for hv_name, hv_params in self.new_hvparams.items():
-        if ((self.op.hvparams and hv_name in self.op.hvparams) or
-            (self.op.enabled_hypervisors and
-             hv_name in self.op.enabled_hypervisors)):
-          # either this is a new hypervisor, or its parameters have changed
-          hv_class = hypervisor.GetHypervisorClass(hv_name)
-          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
-          hv_class.CheckParameterSyntax(hv_params)
-          CheckHVParams(self, node_uuids, hv_name, hv_params)
-
-    self._CheckDiskTemplateConsistency()
-
-    if self.op.os_hvp:
-      # no need to check any newly-enabled hypervisors, since the
-      # defaults have already been checked in the above code-block
-      for os_name, os_hvp in self.new_os_hvp.items():
-        for hv_name, hv_params in os_hvp.items():
-          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
-          # we need to fill in the new os_hvp on top of the actual hv_p
-          cluster_defaults = self.new_hvparams.get(hv_name, {})
-          new_osp = objects.FillDict(cluster_defaults, hv_params)
-          hv_class = hypervisor.GetHypervisorClass(hv_name)
-          hv_class.CheckParameterSyntax(new_osp)
-          CheckHVParams(self, node_uuids, hv_name, new_osp)
-
-    if self.op.default_iallocator:
-      alloc_script = utils.FindFile(self.op.default_iallocator,
-                                    constants.IALLOCATOR_SEARCH_PATH,
-                                    os.path.isfile)
-      if alloc_script is None:
-        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
-                                   " specified" % self.op.default_iallocator,
-                                   errors.ECODE_INVAL)
-
-    if self.op.instance_communication_network:
-      network_name = self.op.instance_communication_network
-
-      try:
-        network_uuid = self.cfg.LookupNetwork(network_name)
-      except errors.OpPrereqError:
-        network_uuid = None
-
-      if network_uuid is not None:
-        network = self.cfg.GetNetwork(network_uuid)
-        self._CheckInstanceCommunicationNetwork(network, self.LogWarning)
-
-    if self.op.compression_tools:
-      CheckCompressionTools(self.op.compression_tools)
-
-  def _BuildOSParams(self, cluster):
-    "Calculate the new OS parameters for this operation."
-
-    def _GetNewParams(source, new_params):
-      "Wrapper around GetUpdatedParams."
-      if new_params is None:
-        return source
-      result = objects.FillDict(source, {}) # deep copy of source
-      for os_name in new_params:
-        result[os_name] = GetUpdatedParams(result.get(os_name, {}),
-                                           new_params[os_name],
-                                           use_none=True)
-        if not result[os_name]:
-          del result[os_name] # we removed all parameters
-      return result
-
-    self.new_osp = _GetNewParams(cluster.osparams,
-                                 self.op.osparams)
-    self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster,
-                                         self.op.osparams_private_cluster)
-
-    # Remove os validity check
-    changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys()))
-    for os_name in changed_oses:
-      os_params = cluster.SimpleFillOS(
-        os_name,
-        self.new_osp.get(os_name, {}),
-        os_params_private=self.new_osp_private.get(os_name, {})
-      )
-      # check the parameter validity (remote check)
-      CheckOSParams(self, False, [self.cfg.GetMasterNode()],
-                    os_name, os_params, False)
-
-  def _CheckDiskTemplateConsistency(self):
-    """Check whether the disk templates that are going to be disabled
-       are still in use by some instances.
-
-    """
-    if self.op.enabled_disk_templates:
-      cluster = self.cfg.GetClusterInfo()
-      instances = self.cfg.GetAllInstancesInfo()
-
-      disk_templates_to_remove = set(cluster.enabled_disk_templates) \
-        - set(self.op.enabled_disk_templates)
-      for instance in instances.itervalues():
-        if instance.disk_template in disk_templates_to_remove:
-          raise errors.OpPrereqError("Cannot disable disk template '%s',"
-                                     " because instance '%s' is using it." %
-                                     (instance.disk_template, instance.name))
-
-  def _SetVgName(self, feedback_fn):
-    """Determines and sets the new volume group name.
-
-    """
-    if self.op.vg_name is not None:
-      new_volume = self.op.vg_name
-      if not new_volume:
-        new_volume = None
-      if new_volume != self.cfg.GetVGName():
-        self.cfg.SetVGName(new_volume)
-      else:
-        feedback_fn("Cluster LVM configuration already in desired"
-                    " state, not changing")
-
-  def _SetFileStorageDir(self, feedback_fn):
-    """Set the file storage directory.
-
-    """
-    if self.op.file_storage_dir is not None:
-      if self.cluster.file_storage_dir == self.op.file_storage_dir:
-        feedback_fn("Global file storage dir already set to value '%s'"
-                    % self.cluster.file_storage_dir)
-      else:
-        self.cluster.file_storage_dir = self.op.file_storage_dir
-
-  def _SetSharedFileStorageDir(self, feedback_fn):
-    """Set the shared file storage directory.
-
-    """
-    if self.op.shared_file_storage_dir is not None:
-      if self.cluster.shared_file_storage_dir == \
-          self.op.shared_file_storage_dir:
-        feedback_fn("Global shared file storage dir already set to value '%s'"
-                    % self.cluster.shared_file_storage_dir)
-      else:
-        self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
+import itertools
+import logging
+import operator
+import re
+import time
+import ganeti.masterd.instance
+import ganeti.rpc.node as rpc
 
-  def _SetDrbdHelper(self, feedback_fn):
-    """Set the DRBD usermode helper.
+from ganeti import compat
+from ganeti import constants
+from ganeti import errors
+from ganeti import locking
+from ganeti import pathutils
+from ganeti import utils
+from ganeti import vcluster
+from ganeti import hypervisor
+from ganeti import opcodes
 
-    """
-    if self.op.drbd_helper is not None:
-      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
-        feedback_fn("Note that you specified a drbd user helper, but did not"
-                    " enable the drbd disk template.")
-      new_helper = self.op.drbd_helper
-      if not new_helper:
-        new_helper = None
-      if new_helper != self.cfg.GetDRBDHelper():
-        self.cfg.SetDRBDHelper(new_helper)
-      else:
-        feedback_fn("Cluster DRBD helper already in desired state,"
-                    " not changing")
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, ResultWithJobs
+from ganeti.cmdlib.common import ShareAll, ComputeAncillaryFiles, \
+    CheckNodePVs, ComputeIPolicyInstanceViolation, AnnotateDiskParams, \
+    SupportsOob
 
-  @staticmethod
-  def _EnsureInstanceCommunicationNetwork(cfg, network_name):
-    """Ensure that the instance communication network exists and is
-    connected to all groups.
 
-    The instance communication network given by L{network_name} it is
-    created, if necessary, via the opcode 'OpNetworkAdd'.  Also, the
-    instance communication network is connected to all existing node
-    groups, if necessary, via the opcode 'OpNetworkConnect'.
+def _GetAllHypervisorParameters(cluster, instances):
+  """Compute the set of all hypervisor parameters.
 
-    @type cfg: L{config.ConfigWriter}
-    @param cfg: cluster configuration
+  @type cluster: L{objects.Cluster}
+  @param cluster: the cluster object
+  @param instances: list of L{objects.Instance}
+  @param instances: additional instances from which to obtain parameters
+  @rtype: list of (origin, hypervisor, parameters)
+  @return: a list with all parameters found, indicating the hypervisor they
+       apply to, and the origin (can be "cluster", "os X", or "instance Y")
 
-    @type network_name: string
-    @param network_name: instance communication network name
+  """
+  hvp_data = []
 
-    @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
-    @return: L{ganeti.cmdlib.ResultWithJobs} if the instance
-             communication needs to be created or it needs to be
-             connected to a group, otherwise L{None}
+  for hv_name in cluster.enabled_hypervisors:
+    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
 
-    """
-    jobs = []
+  for os_name, os_hvp in cluster.os_hvp.items():
+    for hv_name, hv_params in os_hvp.items():
+      if hv_params:
+        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
+        hvp_data.append(("os %s" % os_name, hv_name, full_params))
 
-    try:
-      network_uuid = cfg.LookupNetwork(network_name)
-      network_exists = True
-    except errors.OpPrereqError:
-      network_exists = False
+  # TODO: collapse identical parameter values in a single one
+  for instance in instances:
+    if instance.hvparams:
+      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
+                       cluster.FillHV(instance)))
 
-    if not network_exists:
-      jobs.append(AddInstanceCommunicationNetworkOp(network_name))
+  return hvp_data
 
-    for group_uuid in cfg.GetNodeGroupList():
-      group = cfg.GetNodeGroup(group_uuid)
 
-      if network_exists:
-        network_connected = network_uuid in group.networks
-      else:
-        # The network was created asynchronously by the previous
-        # opcode and, therefore, we don't have access to its
-        # network_uuid.  As a result, we assume that the network is
-        # not connected to any group yet.
-        network_connected = False
-
-      if not network_connected:
-        op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
-        jobs.append(op)
-
-    if jobs:
-      return ResultWithJobs([jobs])
-    else:
-      return None
+class _VerifyErrors(object):
+  """Mix-in for cluster/group verify LUs.
 
-  @staticmethod
-  def _ModifyInstanceCommunicationNetwork(cfg, network_name, feedback_fn):
-    """Update the instance communication network stored in the cluster
-    configuration.
+  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
+  self.op and self._feedback_fn to be available.)
 
-    Compares the user-supplied instance communication network against
-    the one stored in the Ganeti cluster configuration.  If there is a
-    change, the instance communication network may be possibly created
-    and connected to all groups (see
-    L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}).
+  """
 
-    @type cfg: L{config.ConfigWriter}
-    @param cfg: cluster configuration
+  ETYPE_FIELD = "code"
+  ETYPE_ERROR = constants.CV_ERROR
+  ETYPE_WARNING = constants.CV_WARNING
 
-    @type network_name: string
-    @param network_name: instance communication network name
+  def _Error(self, ecode, item, msg, *args, **kwargs):
+    """Format an error message.
 
-    @type feedback_fn: function
-    @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit}
+    Based on the opcode's error_codes parameter, either format a
+    parseable error code, or a simpler error string.
 
-    @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None}
-    @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}
+    This must be called only from Exec and functions called from Exec.
 
     """
-    config_network_name = cfg.GetInstanceCommunicationNetwork()
-
-    if network_name == config_network_name:
-      feedback_fn("Instance communication network already is '%s', nothing to"
-                  " do." % network_name)
+    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
+    itype, etxt, _ = ecode
+    # If the error code is in the list of ignored errors, demote the error to a
+    # warning
+    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
+      ltype = self.ETYPE_WARNING
+    # first complete the msg
+    if args:
+      msg = msg % args
+    # then format the whole message
+    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
+      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
     else:
-      try:
-        cfg.LookupNetwork(config_network_name)
-        feedback_fn("Previous instance communication network '%s'"
-                    " should be removed manually." % config_network_name)
-      except errors.OpPrereqError:
-        pass
-
-      if network_name:
-        feedback_fn("Changing instance communication network to '%s', only new"
-                    " instances will be affected."
-                    % network_name)
-      else:
-        feedback_fn("Disabling instance communication network, only new"
-                    " instances will be affected.")
-
-      cfg.SetInstanceCommunicationNetwork(network_name)
-
-      if network_name:
-        return LUClusterSetParams._EnsureInstanceCommunicationNetwork(
-          cfg,
-          network_name)
+      if item:
+        item = " " + item
       else:
-        return None
+        item = ""
+      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
+    # and finally report it via the feedback_fn
+    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
+    # do not mark the operation as failed for WARN cases only
+    if ltype == self.ETYPE_ERROR:
+      self.bad = True
 
-  def Exec(self, feedback_fn):
-    """Change the parameters of the cluster.
+  def _ErrorIf(self, cond, *args, **kwargs):
+    """Log an error message if the passed condition is True.
 
     """
-    # re-read the fresh configuration
-    self.cluster = self.cfg.GetClusterInfo()
-    if self.op.enabled_disk_templates:
-      self.cluster.enabled_disk_templates = \
-        list(self.op.enabled_disk_templates)
-    # save the changes
-    self.cfg.Update(self.cluster, feedback_fn)
-
-    self._SetVgName(feedback_fn)
-
-    self.cluster = self.cfg.GetClusterInfo()
-    self._SetFileStorageDir(feedback_fn)
-    self._SetSharedFileStorageDir(feedback_fn)
-    self.cfg.Update(self.cluster, feedback_fn)
-    self._SetDrbdHelper(feedback_fn)
-
-    # re-read the fresh configuration again
-    self.cluster = self.cfg.GetClusterInfo()
-
-    ensure_kvmd = False
-
-    active = constants.DATA_COLLECTOR_STATE_ACTIVE
-    if self.op.enabled_data_collectors is not None:
-      for name, val in self.op.enabled_data_collectors.items():
-        self.cluster.data_collectors[name][active] = val
-
-    if self.op.data_collector_interval:
-      internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL
-      for name, val in self.op.data_collector_interval.items():
-        self.cluster.data_collectors[name][internal] = int(val)
-
-    if self.op.hvparams:
-      self.cluster.hvparams = self.new_hvparams
-    if self.op.os_hvp:
-      self.cluster.os_hvp = self.new_os_hvp
-    if self.op.enabled_hypervisors is not None:
-      self.cluster.hvparams = self.new_hvparams
-      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
-      ensure_kvmd = True
-    if self.op.beparams:
-      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
-    if self.op.nicparams:
-      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
-    if self.op.ipolicy:
-      self.cluster.ipolicy = self.new_ipolicy
-    if self.op.osparams:
-      self.cluster.osparams = self.new_osp
-    if self.op.osparams_private_cluster:
-      self.cluster.osparams_private_cluster = self.new_osp_private
-    if self.op.ndparams:
-      self.cluster.ndparams = self.new_ndparams
-    if self.op.diskparams:
-      self.cluster.diskparams = self.new_diskparams
-    if self.op.hv_state:
-      self.cluster.hv_state_static = self.new_hv_state
-    if self.op.disk_state:
-      self.cluster.disk_state_static = self.new_disk_state
-
-    if self.op.candidate_pool_size is not None:
-      self.cluster.candidate_pool_size = self.op.candidate_pool_size
-      # we need to update the pool size here, otherwise the save will fail
-      AdjustCandidatePool(self, [])
-
-    if self.op.max_running_jobs is not None:
-      self.cluster.max_running_jobs = self.op.max_running_jobs
-
-    if self.op.max_tracked_jobs is not None:
-      self.cluster.max_tracked_jobs = self.op.max_tracked_jobs
-
-    if self.op.maintain_node_health is not None:
-      self.cluster.maintain_node_health = self.op.maintain_node_health
-
-    if self.op.modify_etc_hosts is not None:
-      self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
-
-    if self.op.prealloc_wipe_disks is not None:
-      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
-
-    if self.op.add_uids is not None:
-      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
-
-    if self.op.remove_uids is not None:
-      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
-
-    if self.op.uid_pool is not None:
-      self.cluster.uid_pool = self.op.uid_pool
-
-    if self.op.default_iallocator is not None:
-      self.cluster.default_iallocator = self.op.default_iallocator
-
-    if self.op.default_iallocator_params is not None:
-      self.cluster.default_iallocator_params = self.op.default_iallocator_params
-
-    if self.op.reserved_lvs is not None:
-      self.cluster.reserved_lvs = self.op.reserved_lvs
-
-    if self.op.use_external_mip_script is not None:
-      self.cluster.use_external_mip_script = self.op.use_external_mip_script
-
-    if self.op.enabled_user_shutdown is not None and \
-          self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
-      self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
-      ensure_kvmd = True
-
-    def helper_os(aname, mods, desc):
-      desc += " OS list"
-      lst = getattr(self.cluster, aname)
-      for key, val in mods:
-        if key == constants.DDM_ADD:
-          if val in lst:
-            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
-          else:
-            lst.append(val)
-        elif key == constants.DDM_REMOVE:
-          if val in lst:
-            lst.remove(val)
-          else:
-            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
-        else:
-          raise errors.ProgrammerError("Invalid modification '%s'" % key)
-
-    if self.op.hidden_os:
-      helper_os("hidden_os", self.op.hidden_os, "hidden")
-
-    if self.op.blacklisted_os:
-      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
-
-    if self.op.mac_prefix:
-      self.cluster.mac_prefix = self.op.mac_prefix
-
-    if self.op.master_netdev:
-      master_params = self.cfg.GetMasterNetworkParameters()
-      ems = self.cfg.GetUseExternalMipScript()
-      feedback_fn("Shutting down master ip on the current netdev (%s)" %
-                  self.cluster.master_netdev)
-      result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                       master_params, ems)
-      if not self.op.force:
-        result.Raise("Could not disable the master ip")
-      else:
-        if result.fail_msg:
-          msg = ("Could not disable the master ip (continuing anyway): %s" %
-                 result.fail_msg)
-          feedback_fn(msg)
-      feedback_fn("Changing master_netdev from %s to %s" %
-                  (master_params.netdev, self.op.master_netdev))
-      self.cluster.master_netdev = self.op.master_netdev
-
-    if self.op.master_netmask:
-      master_params = self.cfg.GetMasterNetworkParameters()
-      feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
-      result = self.rpc.call_node_change_master_netmask(
-                 master_params.uuid, master_params.netmask,
-                 self.op.master_netmask, master_params.ip,
-                 master_params.netdev)
-      result.Warn("Could not change the master IP netmask", feedback_fn)
-      self.cluster.master_netmask = self.op.master_netmask
-
-    if self.op.install_image:
-      self.cluster.install_image = self.op.install_image
-
-    if self.op.zeroing_image is not None:
-      CheckImageValidity(self.op.zeroing_image,
-                         "Zeroing image must be an absolute path or a URL")
-      self.cluster.zeroing_image = self.op.zeroing_image
-
-    self.cfg.Update(self.cluster, feedback_fn)
-
-    if self.op.master_netdev:
-      master_params = self.cfg.GetMasterNetworkParameters()
-      feedback_fn("Starting the master ip on the new master netdev (%s)" %
-                  self.op.master_netdev)
-      ems = self.cfg.GetUseExternalMipScript()
-      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-      result.Warn("Could not re-enable the master ip on the master,"
-                  " please restart manually", self.LogWarning)
-
-    # Even though 'self.op.enabled_user_shutdown' is being tested
-    # above, the RPCs can only be done after 'self.cfg.Update' because
-    # this will update the cluster object and sync 'Ssconf', and kvmd
-    # uses 'Ssconf'.
-    if ensure_kvmd:
-      EnsureKvmdOnNodes(self, feedback_fn)
-
-    if self.op.compression_tools is not None:
-      self.cfg.SetCompressionTools(self.op.compression_tools)
-
-    network_name = self.op.instance_communication_network
-    if network_name is not None:
-      return self._ModifyInstanceCommunicationNetwork(self.cfg,
-                                                      network_name, feedback_fn)
-    else:
-      return None
+    if (bool(cond)
+        or self.op.debug_simulate_errors): # pylint: disable=E1101
+      self._Error(*args, **kwargs)
 
 
 class LUClusterVerify(NoHooksLU):
@@ -1867,90 +185,24 @@ class LUClusterVerify(NoHooksLU):
     return ResultWithJobs(jobs)
 
 
-class _VerifyErrors(object):
-  """Mix-in for cluster/group verify LUs.
-
-  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
-  self.op and self._feedback_fn to be available.)
-
-  """
-
-  ETYPE_FIELD = "code"
-  ETYPE_ERROR = constants.CV_ERROR
-  ETYPE_WARNING = constants.CV_WARNING
-
-  def _Error(self, ecode, item, msg, *args, **kwargs):
-    """Format an error message.
-
-    Based on the opcode's error_codes parameter, either format a
-    parseable error code, or a simpler error string.
-
-    This must be called only from Exec and functions called from Exec.
-
-    """
-    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
-    itype, etxt, _ = ecode
-    # If the error code is in the list of ignored errors, demote the error to a
-    # warning
-    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
-      ltype = self.ETYPE_WARNING
-    # first complete the msg
-    if args:
-      msg = msg % args
-    # then format the whole message
-    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
-      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
-    else:
-      if item:
-        item = " " + item
-      else:
-        item = ""
-      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
-    # and finally report it via the feedback_fn
-    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
-    # do not mark the operation as failed for WARN cases only
-    if ltype == self.ETYPE_ERROR:
-      self.bad = True
-
-  def _ErrorIf(self, cond, *args, **kwargs):
-    """Log an error message if the passed condition is True.
-
-    """
-    if (bool(cond)
-        or self.op.debug_simulate_errors): # pylint: disable=E1101
-      self._Error(*args, **kwargs)
-
-
-def _GetAllHypervisorParameters(cluster, instances):
-  """Compute the set of all hypervisor parameters.
-
-  @type cluster: L{objects.Cluster}
-  @param cluster: the cluster object
-  @param instances: list of L{objects.Instance}
-  @param instances: additional instances from which to obtain parameters
-  @rtype: list of (origin, hypervisor, parameters)
-  @return: a list with all parameters found, indicating the hypervisor they
-       apply to, and the origin (can be "cluster", "os X", or "instance Y")
+class LUClusterVerifyDisks(NoHooksLU):
+  """Verifies the cluster disks status.
 
   """
-  hvp_data = []
-
-  for hv_name in cluster.enabled_hypervisors:
-    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
+  REQ_BGL = False
 
-  for os_name, os_hvp in cluster.os_hvp.items():
-    for hv_name, hv_params in os_hvp.items():
-      if hv_params:
-        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
-        hvp_data.append(("os %s" % os_name, hv_name, full_params))
+  def ExpandNames(self):
+    self.share_locks = ShareAll()
+    self.needed_locks = {
+      locking.LEVEL_NODEGROUP: locking.ALL_SET,
+      }
 
-  # TODO: collapse identical parameter values in a single one
-  for instance in instances:
-    if instance.hvparams:
-      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
-                       cluster.FillHV(instance)))
+  def Exec(self, feedback_fn):
+    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
 
-  return hvp_data
+    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
+    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
+                           for group in group_names])
 
 
 class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
@@ -2133,11 +385,6 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       locking.LEVEL_INSTANCE: self.cfg.GetInstanceNames(inst_uuids),
       locking.LEVEL_NODEGROUP: [self.group_uuid],
       locking.LEVEL_NODE: [],
-
-      # This opcode is run by watcher every five minutes and acquires all nodes
-      # for a group. It doesn't run for a long time, so it's better to acquire
-      # the node allocation lock as well.
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
 
     self.share_locks = ShareAll()
@@ -2155,7 +402,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       for inst_name in self.owned_locks(locking.LEVEL_INSTANCE):
         # Important: access only the instances whose lock is owned
         instance = self.cfg.GetInstanceInfoByName(inst_name)
-        if instance.disk_template in constants.DTS_INT_MIRROR:
+        disks = self.cfg.GetInstanceDisks(instance.uuid)
+        if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
           nodes.update(self.cfg.GetInstanceSecondaryNodes(instance.uuid))
 
       self.needed_locks[locking.LEVEL_NODE] = nodes
@@ -2190,6 +438,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
+    self.all_disks_info = self.cfg.GetAllDisksInfo()
 
     self.my_node_uuids = group_node_uuids
     self.my_node_info = dict((node_uuid, self.all_node_info[node_uuid])
@@ -2204,7 +453,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     extra_lv_nodes = set()
 
     for inst in self.my_inst_info.values():
-      if inst.disk_template in constants.DTS_INT_MIRROR:
+      disks = self.cfg.GetInstanceDisks(inst.uuid)
+      if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
         inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
         for nuuid in inst_nodes:
           if self.all_node_info[nuuid].group != self.group_uuid:
@@ -2566,26 +816,29 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
     es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
     if any(es_flags.values()):
-      if instance.disk_template not in constants.DTS_EXCL_STORAGE:
+      if not utils.AllDiskOfType(disks, constants.DTS_EXCL_STORAGE):
         # Disk template not compatible with exclusive_storage: no instance
         # node should have the flag set
         es_nodes = [n
                     for (n, es) in es_flags.items()
                     if es]
+        unsupported = [d.dev_type for d in disks
+                       if d.dev_type not in constants.DTS_EXCL_STORAGE]
         self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance.name,
-                    "instance has template %s, which is not supported on nodes"
-                    " that have exclusive storage set: %s",
-                    instance.disk_template,
+                    "instance uses disk types %s, which are not supported on"
+                    " nodes that have exclusive storage set: %s",
+                    utils.CommaJoin(unsupported),
                     utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
-      for (idx, disk) in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
+      for (idx, disk) in enumerate(disks):
         self._ErrorIf(disk.spindles is None,
                       constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
                       "number of spindles not configured for disk %s while"
                       " exclusive storage is enabled, try running"
                       " gnt-cluster repair-disk-sizes", idx)
 
-    if instance.disk_template in constants.DTS_INT_MIRROR:
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
       instance_nodes = utils.NiceSort(inst_nodes)
       instance_groups = {}
 
@@ -2935,14 +1188,51 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
                       "wrong drbd usermode helper: %s", payload)
 
-  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
-                      drbd_map):
+  @staticmethod
+  def _ComputeDrbdMinors(ninfo, instanceinfo, disks_info, drbd_map, error_if):
+    """Gives the DRBD information in a map for a node.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param instanceinfo: the dict of instances
+    @param disks_info: the dict of disks
+    @param drbd_map: the DRBD map as returned by
+        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
+    @type error_if: callable like L{_ErrorIf}
+    @param error_if: The error reporting function
+    @return: dict from minor number to (disk_uuid, instance_uuid, active)
+
+    """
+    node_drbd = {}
+    for minor, disk_uuid in drbd_map[ninfo.uuid].items():
+      test = disk_uuid not in disks_info
+      error_if(test, constants.CV_ECLUSTERCFG, None,
+               "ghost disk '%s' in temporary DRBD map", disk_uuid)
+        # ghost disk should not be active, but otherwise we
+        # don't give double warnings (both ghost disk and
+        # unallocated minor in use)
+      if test:
+        node_drbd[minor] = (disk_uuid, None, False)
+      else:
+        disk_active = False
+        disk_instance = None
+        for (inst_uuid, inst) in instanceinfo.items():
+          if disk_uuid in inst.disks:
+            disk_active = inst.disks_active
+            disk_instance = inst_uuid
+            break
+        node_drbd[minor] = (disk_uuid, disk_instance, disk_active)
+    return node_drbd
+
+  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, disks_info,
+                      drbd_helper, drbd_map):
     """Verifies and the node DRBD status.
 
     @type ninfo: L{objects.Node}
     @param ninfo: the node to check
     @param nresult: the remote results for the node
     @param instanceinfo: the dict of instances
+    @param disks_info: the dict of disks
     @param drbd_helper: the configured DRBD usermode helper
     @param drbd_map: the DRBD map as returned by
         L{ganeti.config.ConfigWriter.ComputeDRBDMap}
@@ -2951,19 +1241,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self._VerifyNodeDrbdHelper(ninfo, nresult, drbd_helper)
 
     # compute the DRBD minors
-    node_drbd = {}
-    for minor, inst_uuid in drbd_map[ninfo.uuid].items():
-      test = inst_uuid not in instanceinfo
-      self._ErrorIf(test, constants.CV_ECLUSTERCFG, None,
-                    "ghost instance '%s' in temporary DRBD map", inst_uuid)
-        # ghost instance should not be running, but otherwise we
-        # don't give double warnings (both ghost instance and
-        # unallocated minor in use)
-      if test:
-        node_drbd[minor] = (inst_uuid, False)
-      else:
-        instance = instanceinfo[inst_uuid]
-        node_drbd[minor] = (inst_uuid, instance.disks_active)
+    node_drbd = self._ComputeDrbdMinors(ninfo, instanceinfo, disks_info,
+                                        drbd_map, self._ErrorIf)
 
     # and now check them
     used_minors = nresult.get(constants.NV_DRBDLIST, [])
@@ -2974,11 +1253,16 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # we cannot check drbd status
       return
 
-    for minor, (inst_uuid, must_exist) in node_drbd.items():
+    for minor, (disk_uuid, inst_uuid, must_exist) in node_drbd.items():
       test = minor not in used_minors and must_exist
+      if inst_uuid is not None:
+        attached = "(attached in instance '%s')" % \
+          self.cfg.GetInstanceName(inst_uuid)
+      else:
+        attached = "(detached)"
       self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
-                    "drbd minor %d of instance %s is not active", minor,
-                    self.cfg.GetInstanceName(inst_uuid))
+                    "drbd minor %d of disk %s %s is not active",
+                    minor, disk_uuid, attached)
     for minor in used_minors:
       test = minor not in node_drbd
       self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
@@ -3294,20 +1578,19 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     node_disks_dev_inst_only = {}
     diskless_instances = set()
     nodisk_instances = set()
-    diskless = constants.DT_DISKLESS
 
     for nuuid in node_uuids:
       node_inst_uuids = list(itertools.chain(node_image[nuuid].pinst,
                                              node_image[nuuid].sinst))
       diskless_instances.update(uuid for uuid in node_inst_uuids
-                                if instanceinfo[uuid].disk_template == diskless)
+                                if not instanceinfo[uuid].disks)
       disks = [(inst_uuid, disk)
                for inst_uuid in node_inst_uuids
                for disk in self.cfg.GetInstanceDisks(inst_uuid)]
 
       if not disks:
         nodisk_instances.update(uuid for uuid in node_inst_uuids
-                                if instanceinfo[uuid].disk_template != diskless)
+                                if instanceinfo[uuid].disks)
         # No need to collect data
         continue
 
@@ -3662,7 +1945,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # FIXME: The check for integrity of config.data should be moved to
     # WConfD, which is the only one who can otherwise ensure nobody
     # will modify the configuration during the check.
-    with self.cfg.GetConfigManager(shared=True):
+    with self.cfg.GetConfigManager(shared=True, forcelock=True):
       feedback_fn("* Gathering information about nodes (%s nodes)" %
                   len(self.my_node_uuids))
       # Force the configuration to be fully distributed before doing any tests
@@ -3793,8 +2076,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       if nimg.vm_capable:
         self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
         if constants.DT_DRBD8 in cluster.enabled_disk_templates:
-          self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
-                               all_drbd_map)
+          self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info,
+                               self.all_disks_info, drbd_helper, all_drbd_map)
 
         if (constants.DT_PLAIN in cluster.enabled_disk_templates) or \
             (constants.DT_DRBD8 in cluster.enabled_disk_templates):
@@ -3836,9 +2119,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         feedback_fn("* Verifying instance %s" % instance.name)
       self._VerifyInstance(instance, node_image, instdisk[inst_uuid])
 
-      # If the instance is non-redundant we cannot survive losing its primary
-      # node, so we are not N+1 compliant.
-      if instance.disk_template not in constants.DTS_MIRRORED:
+      # If the instance is not fully redundant we cannot survive losing its
+      # primary node, so we are not N+1 compliant.
+      inst_disks = self.cfg.GetInstanceDisks(instance.uuid)
+      if not utils.AllDiskOfType(inst_disks, constants.DTS_MIRRORED):
         i_non_redundant.append(instance)
 
       if not cluster.FillBE(instance)[constants.BE_AUTO_BALANCE]:
@@ -3915,23 +2199,3 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
             lu_result = False
 
     return lu_result
-
-
-class LUClusterVerifyDisks(NoHooksLU):
-  """Verifies the cluster disks status.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.share_locks = ShareAll()
-    self.needed_locks = {
-      locking.LEVEL_NODEGROUP: locking.ALL_SET,
-      }
-
-  def Exec(self, feedback_fn):
-    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
-
-    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
-    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
-                           for group in group_names])
index ddc4551..fa2bf77 100644 (file)
@@ -549,8 +549,7 @@ def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
 
 def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                                 nic_count, disk_sizes, spindle_use,
-                                disk_template,
-                                _compute_fn=_ComputeMinMaxSpec):
+                                disk_types, _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
   @type ipolicy: dict
@@ -567,13 +566,15 @@ def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
   @type spindle_use: int
   @param spindle_use: The number of spindles this instance uses
-  @type disk_template: string
-  @param disk_template: The disk template of the instance
+  @type disk_types: list of strings
+  @param disk_types: The disk template of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
   """
   assert disk_count == len(disk_sizes)
+  assert isinstance(disk_types, list)
+  assert disk_count == len(disk_types)
 
   test_settings = [
     (constants.ISPEC_MEM_SIZE, "", mem_size),
@@ -582,14 +583,20 @@ def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
     (constants.ISPEC_SPINDLE_USE, "", spindle_use),
     ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
          for idx, d in enumerate(disk_sizes)]
-  if disk_template != constants.DT_DISKLESS:
+
+  allowed_dts = set(ipolicy[constants.IPOLICY_DTS])
+  ret = []
+  if disk_count != 0:
     # This check doesn't make sense for diskless instances
     test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
-  ret = []
-  allowed_dts = ipolicy[constants.IPOLICY_DTS]
-  if disk_template not in allowed_dts:
+  elif constants.DT_DISKLESS not in allowed_dts:
+    ret.append("Disk template %s is not allowed (allowed templates %s)" %
+                (constants.DT_DISKLESS, utils.CommaJoin(allowed_dts)))
+
+  forbidden_dts = set(disk_types) - allowed_dts
+  if forbidden_dts:
     ret.append("Disk template %s is not allowed (allowed templates: %s)" %
-               (disk_template, utils.CommaJoin(allowed_dts)))
+               (utils.CommaJoin(forbidden_dts), utils.CommaJoin(allowed_dts)))
 
   min_errs = None
   for minmax in ipolicy[constants.ISPECS_MINMAX]:
@@ -602,8 +609,7 @@ def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   return ret + min_errs
 
 
-def ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes,
-                                     disk_template,
+def ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes, disks,
                                      _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided disk sizes.
 
@@ -614,18 +620,21 @@ def ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes,
   @param ipolicy: The ipolicy
   @type disk_sizes: list of ints
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
-  @type disk_template: string
-  @param disk_template: The disk template of the instance
+  @type disks: list of L{Disk}
+  @param disks: The Disk objects of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
   """
+  if len(disk_sizes) != len(disks):
+    return [constants.ISPEC_DISK_COUNT]
+  dev_types = [d.dev_type for d in disks]
   return ComputeIPolicySpecViolation(ipolicy,
                                      # mem_size, cpu_count, disk_count
                                      None, None, len(disk_sizes),
                                      None, disk_sizes, # nic_count, disk_sizes
                                      None, # spindle_use
-                                     disk_template,
+                                     dev_types,
                                      _compute_fn=_compute_fn)
 
 
@@ -665,10 +674,10 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
   disk_count = len(disks)
   disk_sizes = [disk.size for disk in disks]
   nic_count = len(instance.nics)
-  disk_template = instance.disk_template
+  disk_types = [d.dev_type for d in disks]
 
   return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                           disk_sizes, spindle_use, disk_template)
+                           disk_sizes, spindle_use, disk_types)
 
 
 def _ComputeViolatingInstances(ipolicy, instances, cfg):
@@ -1294,20 +1303,21 @@ def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
 
     for entry in inst_uuids:
       inst = cfg.GetInstanceInfo(entry)
-      inst_template = inst.disk_template
+      disks = cfg.GetInstanceDisks(entry)
+      for disk in disks:
 
-      if inst_template != disk_template:
-        continue
+        if disk.dev_type != disk_template:
+          continue
 
-      hv = inst.hypervisor
+        hv = inst.hypervisor
 
-      if not IsValidDiskAccessModeCombination(hv, inst_template, access):
-        raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
-                                   " setting with {h} hypervisor and {d} disk"
-                                   " type.".format(i=inst.name,
-                                                   a=access,
-                                                   h=hv,
-                                                   d=inst_template))
+        if not IsValidDiskAccessModeCombination(hv, disk.dev_type, access):
+          raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
+                                     " setting with {h} hypervisor and {d} disk"
+                                     " type.".format(i=inst.name,
+                                                     a=access,
+                                                     h=hv,
+                                                     d=disk.dev_type))
 
 
 def IsValidDiskAccessModeCombination(hv, disk_template, mode):
index 30983e0..91f8752 100644 (file)
@@ -295,7 +295,7 @@ class LUGroupAssignNodes(NoHooksLU):
     In particular, it returns information about newly split instances, and
     instances that were already split, and remain so after the change.
 
-    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
+    Only disks whose template is listed in constants.DTS_INT_MIRROR are
     considered.
 
     @type changes: list of (node_uuid, new_group_uuid) pairs.
@@ -315,7 +315,8 @@ class LUGroupAssignNodes(NoHooksLU):
     previously_split_instances = set()
 
     for inst in instance_data.values():
-      if inst.disk_template not in constants.DTS_INT_MIRROR:
+      inst_disks = self.cfg.GetInstanceDisks(inst.uuid)
+      if not utils.AnyDiskOfType(inst_disks, constants.DTS_INT_MIRROR):
         continue
 
       inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
@@ -846,12 +847,6 @@ class LUGroupVerifyDisks(NoHooksLU):
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [],
       locking.LEVEL_NODE: [],
-
-      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
-      # starts one instance of this opcode for every group, which means all
-      # nodes will be locked for a short amount of time, so it's better to
-      # acquire the node allocation lock as well.
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
     self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
     self.dont_collate_locks[locking.LEVEL_NODE] = True
@@ -943,7 +938,9 @@ class LUGroupVerifyDisks(NoHooksLU):
   def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
     node_to_inst = {}
     for inst in self.instances.values():
-      if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
+      disks = self.cfg.GetInstanceDisks(inst.uuid)
+      if not (inst.disks_active and
+              utils.AnyDiskOfType(disks, [constants.DT_DRBD8])):
         continue
 
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(inst.uuid)
index 7ce4dd9..a49e33c 100644 (file)
 
 """Logical units dealing with instances."""
 
-import OpenSSL
-import copy
 import logging
 import os
 
 from ganeti import compat
 from ganeti import constants
 from ganeti import errors
-from ganeti import ht
-from ganeti import hypervisor
 from ganeti import locking
 from ganeti.masterd import iallocator
 from ganeti import masterd
 from ganeti import netutils
 from ganeti import objects
-from ganeti import pathutils
-from ganeti import serializer
-import ganeti.rpc.node as rpc
 from ganeti import utils
-from ganeti.utils import retry
 
 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
 
-from ganeti.cmdlib.common import INSTANCE_DOWN, \
+from ganeti.cmdlib.common import \
   INSTANCE_NOT_RUNNING, CheckNodeOnline, \
   ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
-  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
-  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
-  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
-  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
-  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination, \
-  DetermineImageSize, IsInstanceRunning
+  LoadNodeEvacResult, \
+  ExpandInstanceUuidAndName, \
+  CheckInstanceState, ExpandNodeUuidAndName, \
+  CheckDiskTemplateEnabled
 from ganeti.cmdlib.instance_storage import CreateDisks, \
-  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
-  WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
-  ComputeDisks, ComputeDisksInfo, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
-  GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
-  AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk, \
-  CalculateFileStorageDir
-from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
-  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
-  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
-  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+  ComputeDisks, \
+  StartInstanceDisks, ShutdownInstanceDisks, \
+  AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import \
+  BuildInstanceHookEnvByObject,\
+  CheckNodeNotDrained, RemoveInstance, CopyLockList, \
+  CheckNodeVmCapable, CheckTargetNodeIPolicy, \
   GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
-  CheckInstanceBridgesExist, CheckNicsBridgesExist, UpdateMetadata, \
-  CheckCompressionTool, CheckInstanceExistence
+  CheckInstanceBridgesExist, \
+  CheckInstanceExistence, \
+  CheckHostnameSane, CheckOpportunisticLocking, ComputeFullBeParams, \
+  ComputeNics, CreateInstanceAllocRequest
 import ganeti.masterd.instance
 
 
-#: Type description for changes as returned by L{_ApplyContainerMods}'s
-#: callbacks
-_TApplyContModsCbChanges = \
-  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
-    ht.TNonEmptyString,
-    ht.TAny,
-    ])))
-
-
-def _CheckHostnameSane(lu, name):
-  """Ensures that a given hostname resolves to a 'sane' name.
-
-  The given name is required to be a prefix of the resolved hostname,
-  to prevent accidental mismatches.
-
-  @param lu: the logical unit on behalf of which we're checking
-  @param name: the name we should resolve and check
-  @return: the resolved hostname object
-
-  """
-  hostname = netutils.GetHostname(name=name)
-  if hostname.name != name:
-    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
-  if not utils.MatchNameComponent(name, [hostname.name]):
-    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
-                                " same as given hostname '%s'") %
-                               (hostname.name, name), errors.ECODE_INVAL)
-  return hostname
-
-
-def _CheckOpportunisticLocking(op):
-  """Generate error if opportunistic locking is not possible.
-
-  """
-  if op.opportunistic_locking and not op.iallocator:
-    raise errors.OpPrereqError("Opportunistic locking is only available in"
-                               " combination with an instance allocator",
-                               errors.ECODE_INVAL)
-
-
-def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
-  """Wrapper around IAReqInstanceAlloc.
-
-  @param op: The instance opcode
-  @param disks: The computed disks
-  @param nics: The computed nics
-  @param beparams: The full filled beparams
-  @param node_name_whitelist: List of nodes which should appear as online to the
-    allocator (unless the node is already marked offline)
-
-  @returns: A filled L{iallocator.IAReqInstanceAlloc}
-
-  """
-  spindle_use = beparams[constants.BE_SPINDLE_USE]
-  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
-                                       disk_template=op.disk_template,
-                                       group_name=op.group_name,
-                                       tags=op.tags,
-                                       os=op.os_type,
-                                       vcpus=beparams[constants.BE_VCPUS],
-                                       memory=beparams[constants.BE_MAXMEM],
-                                       spindle_use=spindle_use,
-                                       disks=disks,
-                                       nics=[n.ToDict() for n in nics],
-                                       hypervisor=op.hypervisor,
-                                       node_whitelist=node_name_whitelist)
-
-
-def _ComputeFullBeParams(op, cluster):
-  """Computes the full beparams.
-
-  @param op: The instance opcode
-  @param cluster: The cluster config object
-
-  @return: The fully filled beparams
-
-  """
-  default_beparams = cluster.beparams[constants.PP_DEFAULT]
-  for param, value in op.beparams.iteritems():
-    if value == constants.VALUE_AUTO:
-      op.beparams[param] = default_beparams[param]
-  objects.UpgradeBeParams(op.beparams)
-  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
-  return cluster.SimpleFillBE(op.beparams)
-
-
-def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
-  """Computes the nics.
-
-  @param op: The instance opcode
-  @param cluster: Cluster configuration object
-  @param default_ip: The default ip to assign
-  @param cfg: An instance of the configuration object
-  @param ec_id: Execution context ID
-
-  @returns: The build up nics
-
-  """
-  nics = []
-  for nic in op.nics:
-    nic_mode_req = nic.get(constants.INIC_MODE, None)
-    nic_mode = nic_mode_req
-    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
-      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
-
-    net = nic.get(constants.INIC_NETWORK, None)
-    link = nic.get(constants.NIC_LINK, None)
-    ip = nic.get(constants.INIC_IP, None)
-    vlan = nic.get(constants.INIC_VLAN, None)
-
-    if net is None or net.lower() == constants.VALUE_NONE:
-      net = None
-    else:
-      if nic_mode_req is not None or link is not None:
-        raise errors.OpPrereqError("If network is given, no mode or link"
-                                   " is allowed to be passed",
-                                   errors.ECODE_INVAL)
-
-    # ip validity checks
-    if ip is None or ip.lower() == constants.VALUE_NONE:
-      nic_ip = None
-    elif ip.lower() == constants.VALUE_AUTO:
-      if not op.name_check:
-        raise errors.OpPrereqError("IP address set to auto but name checks"
-                                   " have been skipped",
-                                   errors.ECODE_INVAL)
-      nic_ip = default_ip
-    else:
-      # We defer pool operations until later, so that the iallocator has
-      # filled in the instance's node(s) dimara
-      if ip.lower() == constants.NIC_IP_POOL:
-        if net is None:
-          raise errors.OpPrereqError("if ip=pool, parameter network"
-                                     " must be passed too",
-                                     errors.ECODE_INVAL)
-
-      elif not netutils.IPAddress.IsValid(ip):
-        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
-                                   errors.ECODE_INVAL)
-
-      nic_ip = ip
-
-    # TODO: check the ip address for uniqueness
-    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net:
-      raise errors.OpPrereqError("Routed nic mode requires an ip address"
-                                 " if not attached to a network",
-                                 errors.ECODE_INVAL)
-
-    # MAC address verification
-    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
-    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-      mac = utils.NormalizeAndValidateMac(mac)
-
-      try:
-        # TODO: We need to factor this out
-        cfg.ReserveMAC(mac, ec_id)
-      except errors.ReservationError:
-        raise errors.OpPrereqError("MAC address %s already in use"
-                                   " in cluster" % mac,
-                                   errors.ECODE_NOTUNIQUE)
-
-    #  Build nic parameters
-    nicparams = {}
-    if nic_mode_req:
-      nicparams[constants.NIC_MODE] = nic_mode
-    if link:
-      nicparams[constants.NIC_LINK] = link
-    if vlan:
-      nicparams[constants.NIC_VLAN] = vlan
-
-    check_params = cluster.SimpleFillNIC(nicparams)
-    objects.NIC.CheckParameterSyntax(check_params)
-    net_uuid = cfg.LookupNetwork(net)
-    name = nic.get(constants.INIC_NAME, None)
-    if name is not None and name.lower() == constants.VALUE_NONE:
-      name = None
-    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
-                          network=net_uuid, nicparams=nicparams)
-    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
-    nics.append(nic_obj)
-
-  return nics
-
-
-def _CheckForConflictingIp(lu, ip, node_uuid):
-  """In case of conflicting IP address raise error.
-
-  @type ip: string
-  @param ip: IP address
-  @type node_uuid: string
-  @param node_uuid: node UUID
-
-  """
-  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
-  if conf_net is not None:
-    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
-                                " network %s, but the target NIC does not." %
-                                (ip, conf_net)),
-                               errors.ECODE_STATE)
-
-  return (None, None)
-
-
-def _ComputeIPolicyInstanceSpecViolation(
-  ipolicy, instance_spec, disk_template,
-  _compute_fn=ComputeIPolicySpecViolation):
-  """Compute if instance specs meets the specs of ipolicy.
-
-  @type ipolicy: dict
-  @param ipolicy: The ipolicy to verify against
-  @param instance_spec: dict
-  @param instance_spec: The instance spec to verify
-  @type disk_template: string
-  @param disk_template: the disk template of the instance
-  @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{ComputeIPolicySpecViolation}
-
-  """
-  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
-  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
-  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
-  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
-  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
-  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
-
-  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use, disk_template)
-
-
-def _ComputeInstanceCommunicationNIC(instance_name):
-  """Compute the name of the instance NIC used by instance
-  communication.
-
-  With instance communication, a new NIC is added to the instance.
-  This NIC has a special name that identities it as being part of
-  instance communication, and not just a normal NIC.  This function
-  generates the name of the NIC based on a prefix and the instance
-  name
-
-  @type instance_name: string
-  @param instance_name: name of the instance the NIC belongs to
-
-  @rtype: string
-  @return: name of the NIC
-
-  """
-  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
-
-
-class LUInstanceCreate(LogicalUnit):
-  """Create an instance.
-
-  """
-  HPATH = "instance-add"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def _CheckDiskTemplateValid(self):
-    """Checks validity of disk template.
-
-    """
-    cluster = self.cfg.GetClusterInfo()
-    if self.op.disk_template is None:
-      # FIXME: It would be better to take the default disk template from the
-      # ipolicy, but for the ipolicy we need the primary node, which we get from
-      # the iallocator, which wants the disk template as input. To solve this
-      # chicken-and-egg problem, it should be possible to specify just a node
-      # group from the iallocator and take the ipolicy from that.
-      self.op.disk_template = cluster.enabled_disk_templates[0]
-    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
-
-  def _CheckDiskArguments(self):
-    """Checks validity of disk-related arguments.
-
-    """
-    # check that disk's names are unique and valid
-    utils.ValidateDeviceNames("disk", self.op.disks)
-
-    self._CheckDiskTemplateValid()
-
-    # check disks. parameter names and consistent adopt/no-adopt strategy
-    has_adopt = has_no_adopt = False
-    for disk in self.op.disks:
-      if self.op.disk_template != constants.DT_EXT:
-        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
-      if constants.IDISK_ADOPT in disk:
-        has_adopt = True
-      else:
-        has_no_adopt = True
-    if has_adopt and has_no_adopt:
-      raise errors.OpPrereqError("Either all disks are adopted or none is",
-                                 errors.ECODE_INVAL)
-    if has_adopt:
-      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
-        raise errors.OpPrereqError("Disk adoption is not supported for the"
-                                   " '%s' disk template" %
-                                   self.op.disk_template,
-                                   errors.ECODE_INVAL)
-      if self.op.iallocator is not None:
-        raise errors.OpPrereqError("Disk adoption not allowed with an"
-                                   " iallocator script", errors.ECODE_INVAL)
-      if self.op.mode == constants.INSTANCE_IMPORT:
-        raise errors.OpPrereqError("Disk adoption not allowed for"
-                                   " instance import", errors.ECODE_INVAL)
-    else:
-      if self.op.disk_template in constants.DTS_MUST_ADOPT:
-        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
-                                   " but no 'adopt' parameter given" %
-                                   self.op.disk_template,
-                                   errors.ECODE_INVAL)
-
-    self.adopt_disks = has_adopt
-
-  def _CheckVLANArguments(self):
-    """ Check validity of VLANs if given
-
-    """
-    for nic in self.op.nics:
-      vlan = nic.get(constants.INIC_VLAN, None)
-      if vlan:
-        if vlan[0] == ".":
-          # vlan starting with dot means single untagged vlan,
-          # might be followed by trunk (:)
-          if not vlan[1:].isdigit():
-            vlanlist = vlan[1:].split(':')
-            for vl in vlanlist:
-              if not vl.isdigit():
-                raise errors.OpPrereqError("Specified VLAN parameter is "
-                                           "invalid : %s" % vlan,
-                                             errors.ECODE_INVAL)
-        elif vlan[0] == ":":
-          # Trunk - tagged only
-          vlanlist = vlan[1:].split(':')
-          for vl in vlanlist:
-            if not vl.isdigit():
-              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
-                                           " : %s" % vlan, errors.ECODE_INVAL)
-        elif vlan.isdigit():
-          # This is the simplest case. No dots, only single digit
-          # -> Create untagged access port, dot needs to be added
-          nic[constants.INIC_VLAN] = "." + vlan
-        else:
-          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
-                                       " : %s" % vlan, errors.ECODE_INVAL)
-
-  def CheckArguments(self):
-    """Check arguments.
-
-    """
-    # do not require name_check to ease forward/backward compatibility
-    # for tools
-    if self.op.no_install and self.op.start:
-      self.LogInfo("No-installation mode selected, disabling startup")
-      self.op.start = False
-    # validate/normalize the instance name
-    self.op.instance_name = \
-      netutils.Hostname.GetNormalizedName(self.op.instance_name)
-
-    if self.op.ip_check and not self.op.name_check:
-      # TODO: make the ip check more flexible and not depend on the name check
-      raise errors.OpPrereqError("Cannot do IP address check without a name"
-                                 " check", errors.ECODE_INVAL)
-
-    # instance name verification
-    if self.op.name_check:
-      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
-      self.op.instance_name = self.hostname.name
-      # used in CheckPrereq for ip ping check
-      self.check_ip = self.hostname.ip
-    else:
-      self.check_ip = None
-
-    # add NIC for instance communication
-    if self.op.instance_communication:
-      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
-
-      for nic in self.op.nics:
-        if nic.get(constants.INIC_NAME, None) == nic_name:
-          break
-      else:
-        self.op.nics.append({constants.INIC_NAME: nic_name,
-                             constants.INIC_MAC: constants.VALUE_GENERATE,
-                             constants.INIC_IP: constants.NIC_IP_POOL,
-                             constants.INIC_NETWORK:
-                               self.cfg.GetInstanceCommunicationNetwork()})
-
-    # timeouts for unsafe OS installs
-    if self.op.helper_startup_timeout is None:
-      self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP
-
-    if self.op.helper_shutdown_timeout is None:
-      self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN
-
-    # check nics' parameter names
-    for nic in self.op.nics:
-      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
-    # check that NIC's parameters names are unique and valid
-    utils.ValidateDeviceNames("NIC", self.op.nics)
-
-    self._CheckVLANArguments()
-
-    self._CheckDiskArguments()
-    assert self.op.disk_template is not None
-
-    # file storage checks
-    if (self.op.file_driver and
-        not self.op.file_driver in constants.FILE_DRIVER):
-      raise errors.OpPrereqError("Invalid file driver name '%s'" %
-                                 self.op.file_driver, errors.ECODE_INVAL)
-
-    # set default file_driver if unset and required
-    if (not self.op.file_driver and
-        self.op.disk_template in constants.DTS_FILEBASED):
-      self.op.file_driver = constants.FD_DEFAULT
-
-    ### Node/iallocator related checks
-    CheckIAllocatorOrNode(self, "iallocator", "pnode")
-
-    if self.op.pnode is not None:
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        if self.op.snode is None:
-          raise errors.OpPrereqError("The networked disk templates need"
-                                     " a mirror node", errors.ECODE_INVAL)
-      elif self.op.snode:
-        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
-                        " template")
-        self.op.snode = None
-
-    _CheckOpportunisticLocking(self.op)
-
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      # On import force_variant must be True, because if we forced it at
-      # initial install, our only chance when importing it back is that it
-      # works again!
-      self.op.force_variant = True
-
-      if self.op.no_install:
-        self.LogInfo("No-installation mode has no effect during import")
-
-      if objects.GetOSImage(self.op.osparams):
-        self.LogInfo("OS image has no effect during import")
-    elif self.op.mode == constants.INSTANCE_CREATE:
-      os_image = CheckOSImage(self.op)
-
-      if self.op.os_type is None and os_image is None:
-        raise errors.OpPrereqError("No guest OS or OS image specified",
-                                   errors.ECODE_INVAL)
-
-      if self.op.os_type is not None \
-            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
-        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
-                                   " installation" % self.op.os_type,
-                                   errors.ECODE_STATE)
-    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
-      if objects.GetOSImage(self.op.osparams):
-        self.LogInfo("OS image has no effect during import")
-
-      self._cds = GetClusterDomainSecret()
-
-      # Check handshake to ensure both clusters have the same domain secret
-      src_handshake = self.op.source_handshake
-      if not src_handshake:
-        raise errors.OpPrereqError("Missing source handshake",
-                                   errors.ECODE_INVAL)
-
-      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
-                                                           src_handshake)
-      if errmsg:
-        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
-                                   errors.ECODE_INVAL)
-
-      # Load and check source CA
-      self.source_x509_ca_pem = self.op.source_x509_ca
-      if not self.source_x509_ca_pem:
-        raise errors.OpPrereqError("Missing source X509 CA",
-                                   errors.ECODE_INVAL)
-
-      try:
-        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
-                                                    self._cds)
-      except OpenSSL.crypto.Error, err:
-        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
-                                   (err, ), errors.ECODE_INVAL)
-
-      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
-      if errcode is not None:
-        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
-                                   errors.ECODE_INVAL)
-
-      self.source_x509_ca = cert
-
-      src_instance_name = self.op.source_instance_name
-      if not src_instance_name:
-        raise errors.OpPrereqError("Missing source instance name",
-                                   errors.ECODE_INVAL)
-
-      self.source_instance_name = \
-        netutils.GetHostname(name=src_instance_name).name
-
-    else:
-      raise errors.OpPrereqError("Invalid instance creation mode %r" %
-                                 self.op.mode, errors.ECODE_INVAL)
-
-  def ExpandNames(self):
-    """ExpandNames for CreateInstance.
-
-    Figure out the right locks for instance creation.
-
-    """
-    self.needed_locks = {}
-
-    # this is just a preventive check, but someone might still add this
-    # instance in the meantime, and creation will fail at lock-add time
-    CheckInstanceExistence(self, self.op.instance_name)
-
-    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
-
-    if self.op.iallocator: