Merge branch 'stable-2.11' into stable-2.12
authorPetr Pudlak <pudlak@google.com>
Fri, 13 Jun 2014 11:34:48 +0000 (13:34 +0200)
committerPetr Pudlak <pudlak@google.com>
Fri, 13 Jun 2014 13:07:59 +0000 (15:07 +0200)
* stable-2.11
  Bump revision to 2.11.2
  Prepare NEWS file for 2.11.2 release
  Document '--user-shutdown' cluster parameter
  Consider 'Cluster.enabled_user_shutdown' in instance queries
  Extend QA with cluster wide user shutdown tests
  Modify how the KVM daemon is started/stopped
  RPC to start/stop a daemon on a node
  KVM daemon decides if it should run
  Add hypervisor list, user shutdown, and vm capable to Ssconf
  Add 'enabled_user_shutdown' cluster field
  Improve Python conditionals 'is None' and 'is not None'
  Reuse existing helper function to stop a daemon
  Remove unused local variable
  Fix docstring and relax type contraints
  Fix docstrings for hvparams
  Fix docstring
  Improve python dict check
  Fix instance user shutdown QA

* stable-2.10
  Remove 'physical_id' from testing data
  Support disk hotplug with userspace access
  Check for SSL encoding inconsistencies

Conflicts:
NEWS
configure.ac
lib/bootstrap.py
lib/client/gnt_cluster.py
lib/cmdlib/cluster.py
lib/cmdlib/common.py
lib/objects.py
man/gnt-cluster.rst
qa/ganeti-qa.py
src/Ganeti/Objects.hs
src/Ganeti/OpCodes.hs
src/Ganeti/Query/Server.hs
src/Ganeti/Ssconf.hs
test/data/instance-prim-sec.txt
test/hs/Test/Ganeti/OpCodes.hs
test/hs/Test/Ganeti/Ssconf.hs
test/py/cfgupgrade_unittest.py
test/py/daemon-util_unittest.bash
tools/cfgupgrade
Resolutions:
  - Merge newly added configuration options in 2.11 and 2.12
  - Include KVMD changes from 2.11
  - Update the testing 2.11 configuration for up/downgrades

Signed-off-by: Petr Pudlak <pudlak@google.com>
Reviewed-by: Klaus Aehlig <aehlig@google.com>

44 files changed:
1  2 
NEWS
daemons/daemon-util.in
lib/backend.py
lib/bootstrap.py
lib/cli.py
lib/client/gnt_cluster.py
lib/cmdlib/cluster.py
lib/cmdlib/common.py
lib/cmdlib/instance.py
lib/cmdlib/instance_operation.py
lib/cmdlib/instance_query.py
lib/cmdlib/instance_storage.py
lib/cmdlib/node.py
lib/config.py
lib/hypervisor/hv_kvm/__init__.py
lib/objects.py
lib/query.py
lib/rpc_defs.py
lib/server/noded.py
lib/ssconf.py
lib/utils/__init__.py
lib/watcher/__init__.py
man/gnt-cluster.rst
man/gnt-instance.rst
qa/ganeti-qa.py
qa/qa_cluster.py
qa/qa_instance.py
src/Ganeti/Constants.hs
src/Ganeti/Objects.hs
src/Ganeti/OpCodes.hs
src/Ganeti/OpParams.hs
src/Ganeti/Query/Server.hs
src/Ganeti/Ssconf.hs
src/Ganeti/Types.hs
src/Ganeti/Utils.hs
test/data/cluster_config_2.11.json
test/hs/Test/Ganeti/OpCodes.hs
test/hs/Test/Ganeti/Ssconf.hs
test/py/cfgupgrade_unittest.py
test/py/cmdlib/backup_unittest.py
test/py/cmdlib/instance_unittest.py
test/py/daemon-util_unittest.bash
test/py/ganeti.query_unittest.py
tools/cfgupgrade

diff --cc NEWS
--- 1/NEWS
--- 2/NEWS
+++ b/NEWS
@@@ -2,43 -2,33 +2,70 @@@ New
  ====
  
  
 +Version 2.12.0 alpha1
 +---------------------
 +
 +*(unreleased)*
 +
 +Incompatible/important changes
 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 +
 +- Do not use debug mode in production. Certain daemons will issue warnings
 +  when launched in debug mode. Some debug logging violates some of the new
 +  invariants in the system (see "New features"). The logging has been kept as
 +  it aids diagnostics and development.
 +
 +New features
 +~~~~~~~~~~~~
 +
 +- OS install script parameters now come in public, private and secret
 +  varieties:
 +
 +  - Public parameters are like all other parameters in Ganeti.
 +  - Ganeti will not log private and secret parameters, *unless* it is running
 +    in debug mode.
 +  - Ganeti will not save secret parameters to configuration. Secret parameters
 +    must be supplied every time you install, or reinstall, an instance.
 +  - Attempting to override public parameters with private or secret parameters
 +    results in an error. Similarly, you may not use secret parameters to
 +    override private parameters.
 +
 +- The move-instance tool can now attempt to allocate an instance by using
 +  opportunistic locking when an iallocator is used.
 +- The build system creates sample systemd unit files, available under
 +  doc/examples/systemd. These unit files allow systemd to natively
 +  manage and supervise all Ganeti processes.
 +- Different types of compression can be applied during instance moves, including
 +  user-specified ones.
 +
 +
+ Version 2.11.2
+ --------------
+ *(Released Fri, 13 Jun 2014)*
+ - Improvements to KVM wrt to the kvmd and instance shutdown behavior.
+   WARNING: In contrast to our standard policy, this bug fix update
+   introduces new parameters to the configuration. This means in
+   particular that after an upgrade from 2.11.0 or 2.11.1, 'cfgupgrade'
+   needs to be run, either manually or explicitly by running
+   'gnt-cluster upgrade --to 2.11.2' (which requires that they
+   had configured the cluster with --enable-full-version).
+   This also means, that it is not easily possible to downgrade from
+   2.11.2 to 2.11.1 or 2.11.0. The only way is to go back to 2.10 and
+   back.
+ Inherited from the 2.10 branch:
+ - Check for SSL encoding inconsistencies
+ - Check drbd helper only in VM capable nodes
+ - Improvements in statistics utils
+ Inherited from the 2.9 branch:
+ - check-man-warnings: use C.UTF-8 and set LC_ALL
  Version 2.11.1
  --------------
  
@@@ -29,18 -29,12 +29,19 @@@ readonly defaults_file="$SYSCONFDIR/def
  # they're stopped in reverse order.
  DAEMONS=(
    ganeti-noded
 -  ganeti-masterd
 +  ganeti-wconfd
    ganeti-rapi
    ganeti-luxid
+   ganeti-kvmd
    )
  
 +# This is the list of daemons that are loaded on demand; they should only be
 +# stopped, not started.
 +ON_DEMAND_DAEMONS=(
 +  ganeti-metad
 +  ganeti-kvmd
 +  )
 +
  _confd_enabled() {
    [[ "@CUSTOM_ENABLE_CONFD@" == True ]]
  }
diff --cc lib/backend.py
Simple merge
@@@ -553,7 -553,7 +553,8 @@@ def InitCluster(cluster_name, mac_prefi
                  primary_ip_version=None, ipolicy=None,
                  prealloc_wipe_disks=False, use_external_mip_script=False,
                  hv_state=None, disk_state=None, enabled_disk_templates=None,
-                 install_image=None, zeroing_image=None, compression_tools=None):
++                install_image=None, zeroing_image=None, compression_tools=None,
+                 enabled_user_shutdown=False):
    """Initialise the cluster.
  
    @type candidate_pool_size: int
      disk_state_static=disk_state,
      enabled_disk_templates=enabled_disk_templates,
      candidate_certs=candidate_certs,
 +    osparams={},
 +    osparams_private_cluster={},
 +    install_image=install_image,
 +    zeroing_image=zeroing_image,
-     compression_tools=compression_tools
++    compression_tools=compression_tools,
+     enabled_user_shutdown=enabled_user_shutdown,
      )
    master_node_config = objects.Node(name=hostname.name,
                                      primary_ip=hostname.ip,
diff --cc lib/cli.py
Simple merge
@@@ -265,19 -264,13 +265,25 @@@ def InitCluster(opts, args)
  
    hv_state = dict(opts.hv_state)
  
 +  if opts.install_image:
 +    install_image = opts.install_image
 +  else:
 +    install_image = ""
 +
 +  if opts.zeroing_image:
 +    zeroing_image = opts.zeroing_image
 +  else:
 +    zeroing_image = ""
 +
 +  compression_tools = _GetCompressionTools(opts)
 +
    default_ialloc_params = opts.default_iallocator_params
+   if opts.enabled_user_shutdown:
+     enabled_user_shutdown = True
+   else:
+     enabled_user_shutdown = False
    bootstrap.InitCluster(cluster_name=args[0],
                          secondary_ip=opts.secondary_ip,
                          vg_name=vg_name,
                          hv_state=hv_state,
                          disk_state=disk_state,
                          enabled_disk_templates=enabled_disk_templates,
 +                        install_image=install_image,
 +                        zeroing_image=zeroing_image,
-                         compression_tools=compression_tools
++                        compression_tools=compression_tools,
+                         enabled_user_shutdown=enabled_user_shutdown,
                          )
    op = opcodes.OpClusterPostInit()
    SubmitOpCode(op, opts=opts)
@@@ -561,11 -547,7 +568,12 @@@ def ShowClusterConfig(opts, args)
         utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
        ("enabled disk templates",
         utils.CommaJoin(result["enabled_disk_templates"])),
 +      ("install image", result["install_image"]),
 +      ("instance communication network",
 +       result["instance_communication_network"]),
 +      ("zeroing image", result["zeroing_image"]),
 +      ("compression tools", result["compression_tools"]),
+       ("enabled user shutdown", result["enabled_user_shutdown"]),
        ]),
  
      ("Default node parameters",
@@@ -1154,11 -1123,8 +1162,13 @@@ def SetClusterParams(opts, args)
            opts.ipolicy_spindle_ratio is not None or
            opts.modify_etc_hosts is not None or
            opts.file_storage_dir is not None or
 +          opts.install_image is not None or
 +          opts.instance_communication_network is not None or
 +          opts.zeroing_image is not None or
 +          opts.shared_file_storage_dir is not None or
-           opts.compression_tools is not None):
++          opts.compression_tools is not None or
+           opts.shared_file_storage_dir is not None or
+           opts.enabled_user_shutdown is not None):
      ToStderr("Please give at least one of the parameters.")
      return 1
  
      enabled_disk_templates=enabled_disk_templates,
      force=opts.force,
      file_storage_dir=opts.file_storage_dir,
 +    install_image=opts.install_image,
 +    instance_communication_network=opts.instance_communication_network,
 +    zeroing_image=opts.zeroing_image,
      shared_file_storage_dir=opts.shared_file_storage_dir,
-     compression_tools=compression_tools
++    compression_tools=compression_tools,
+     enabled_user_shutdown=opts.enabled_user_shutdown,
      )
 -  SubmitOrSend(op, opts)
 -  return 0
 +  return base.GetResult(None, opts, SubmitOrSend(op, opts))
  
  
  def QueueOps(opts, args):
@@@ -2141,8 -2119,8 +2152,10 @@@ commands = 
       PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT,
       GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT,
       HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
 -     ENABLED_USER_SHUTDOWN_OPT, IPOLICY_STD_SPECS_OPT,
 -     GLOBAL_GLUSTER_FILEDIR_OPT]
 +     IPOLICY_STD_SPECS_OPT, GLOBAL_GLUSTER_FILEDIR_OPT, INSTALL_IMAGE_OPT,
-      ZEROING_IMAGE_OPT, COMPRESSION_TOOLS_OPT]
++     ZEROING_IMAGE_OPT, COMPRESSION_TOOLS_OPT,
++     ENABLED_USER_SHUTDOWN_OPT,
++     ]
       + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
      "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
    "destroy": (
    "modify": (
      SetClusterParams, ARGS_NONE,
      [FORCE_OPT,
 -     BACKEND_OPT, CP_SIZE_OPT, RQL_OPT,
 -     ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
 -     MASTER_NETMASK_OPT, NIC_PARAMS_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
 -     UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
 -     DEFAULT_IALLOCATOR_OPT, DEFAULT_IALLOCATOR_PARAMS_OPT, RESERVED_LVS_OPT,
 -     DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT,
 -     USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] +
 -     SUBMIT_OPTS +
 +     BACKEND_OPT, CP_SIZE_OPT, RQL_OPT, INSTALL_IMAGE_OPT,
 +     INSTANCE_COMMUNICATION_NETWORK_OPT, ENABLED_HV_OPT, HVLIST_OPT,
 +     MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT, NIC_PARAMS_OPT,
 +     VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT,
 +     REMOVE_UIDS_OPT, DRBD_HELPER_OPT, DEFAULT_IALLOCATOR_OPT,
 +     DEFAULT_IALLOCATOR_PARAMS_OPT, RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT,
 +     PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT,
 +     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] + SUBMIT_OPTS +
-      [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] +
+      [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT,
+       ENABLED_USER_SHUTDOWN_OPT] +
 -     INSTANCE_POLICY_OPTS + [GLOBAL_FILEDIR_OPT, GLOBAL_SHARED_FILEDIR_OPT],
 +     INSTANCE_POLICY_OPTS +
 +     [GLOBAL_FILEDIR_OPT, GLOBAL_SHARED_FILEDIR_OPT, ZEROING_IMAGE_OPT,
 +      COMPRESSION_TOOLS_OPT],
      "[opts...]",
      "Alters the parameters of the cluster"),
    "renew-crypto": (
@@@ -56,9 -56,7 +56,10 @@@ from ganeti.cmdlib.common import ShareA
    CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
    ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
    CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
 +  CheckDiskAccessModeConsistency, CreateNewClientCert, \
 +  AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
-   CheckImageValidity
++  CheckImageValidity, \
+   CheckDiskAccessModeConsistency, CreateNewClientCert, EnsureKvmdOnNodes
  
  import ganeti.masterd.instance
  
@@@ -415,9 -434,7 +416,10 @@@ class LUClusterQuery(NoHooksLU)
        "hidden_os": cluster.hidden_os,
        "blacklisted_os": cluster.blacklisted_os,
        "enabled_disk_templates": cluster.enabled_disk_templates,
 +      "install_image": cluster.install_image,
 +      "instance_communication_network": cluster.instance_communication_network,
 +      "compression_tools": cluster.compression_tools,
+       "enabled_user_shutdown": cluster.enabled_user_shutdown,
        }
  
      return result
@@@ -1531,19 -1326,14 +1530,21 @@@ class LUClusterSetParams(LogicalUnit)
      if self.op.enabled_disk_templates:
        self.cluster.enabled_disk_templates = \
          list(self.op.enabled_disk_templates)
 +    # save the changes
 +    self.cfg.Update(self.cluster, feedback_fn)
  
      self._SetVgName(feedback_fn)
 +
 +    self.cluster = self.cfg.GetClusterInfo()
      self._SetFileStorageDir(feedback_fn)
 -    self._SetSharedFileStorageDir(feedback_fn)
 +    self.cfg.Update(self.cluster, feedback_fn)
      self._SetDrbdHelper(feedback_fn)
  
 +    # re-read the fresh configuration again
 +    self.cluster = self.cfg.GetClusterInfo()
 +
+     ensure_kvmd = False
      if self.op.hvparams:
        self.cluster.hvparams = self.new_hvparams
      if self.op.os_hvp:
        result.Warn("Could not re-enable the master ip on the master,"
                    " please restart manually", self.LogWarning)
  
+     # Even though 'self.op.enabled_user_shutdown' is being tested
+     # above, the RPCs can only be done after 'self.cfg.Update' because
+     # this will update the cluster object and sync 'Ssconf', and kvmd
+     # uses 'Ssconf'.
+     if ensure_kvmd:
+       EnsureKvmdOnNodes(self, feedback_fn)
 +    if self.op.compression_tools is not None:
 +      self.cfg.SetCompressionTools(self.op.compression_tools)
 +
 +    network_name = self.op.instance_communication_network
 +    if network_name is not None:
 +      return self._ModifyInstanceCommunicationNetwork(self.cfg,
 +                                                      network_name, feedback_fn)
 +    else:
 +      return None
 +
  
  class LUClusterVerify(NoHooksLU):
    """Submits all jobs necessary to verify the cluster.
@@@ -1366,101 -1281,63 +1366,163 @@@ def CreateNewClientCert(lu, node_uuid, 
    return new_digest
  
  
 +def AddInstanceCommunicationNetworkOp(network):
 +  """Create an OpCode that adds the instance communication network.
 +
 +  This OpCode contains the configuration necessary for the instance
 +  communication network.
 +
 +  @type network: string
 +  @param network: name or UUID of the instance communication network
 +
 +  @rtype: L{ganeti.opcodes.OpCode}
 +  @return: OpCode that creates the instance communication network
 +
 +  """
 +  return opcodes.OpNetworkAdd(
 +    network_name=network,
 +    gateway=None,
 +    network=constants.INSTANCE_COMMUNICATION_NETWORK4,
 +    gateway6=None,
 +    network6=constants.INSTANCE_COMMUNICATION_NETWORK6,
 +    mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX,
 +    add_reserved_ips=None,
 +    conflicts_check=True,
 +    tags=[])
 +
 +
 +def ConnectInstanceCommunicationNetworkOp(group_uuid, network):
 +  """Create an OpCode that connects a group to the instance
 +  communication network.
 +
 +  This OpCode contains the configuration necessary for the instance
 +  communication network.
 +
 +  @type group_uuid: string
 +  @param group_uuid: UUID of the group to connect
 +
 +  @type network: string
 +  @param network: name or UUID of the network to connect to, i.e., the
 +                  instance communication network
 +
 +  @rtype: L{ganeti.opcodes.OpCode}
 +  @return: OpCode that connects the group to the instance
 +           communication network
 +
 +  """
 +  return opcodes.OpNetworkConnect(
 +    group_name=group_uuid,
 +    network_name=network,
 +    network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
 +    network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
 +    conflicts_check=True)
 +
 +
 +def DetermineImageSize(lu, image, node_uuid):
 +  """Determines the size of the specified image.
 +
 +  @type image: string
 +  @param image: absolute filepath or URL of the image
 +
 +  @type node_uuid: string
 +  @param node_uuid: if L{image} is a filepath, this is the UUID of the
 +    node where the image is located
 +
 +  @rtype: int
 +  @return: size of the image in MB, rounded up
 +  @raise OpExecError: if the image does not exist
 +
 +  """
 +  # Check if we are dealing with a URL first
 +  class _HeadRequest(urllib2.Request):
 +    def get_method(self):
 +      return "HEAD"
 +
 +  if utils.IsUrl(image):
 +    try:
 +      response = urllib2.urlopen(_HeadRequest(image))
 +    except urllib2.URLError:
 +      raise errors.OpExecError("Could not retrieve image from given url '%s'" %
 +                               image)
 +
 +    content_length_str = response.info().getheader('content-length')
 +
 +    if not content_length_str:
 +      raise errors.OpExecError("Could not determine image size from given url"
 +                               " '%s'" % image)
 +
 +    byte_size = int(content_length_str)
 +  else:
 +    # We end up here if a file path is used
 +    result = lu.rpc.call_get_file_info(node_uuid, image)
 +    result.Raise("Could not determine size of file '%s'" % image)
 +
 +    success, attributes = result.payload
 +    if not success:
 +      raise errors.OpExecError("Could not open file '%s'" % image)
 +    byte_size = attributes[constants.STAT_SIZE]
 +
 +  # Finally, the conversion
 +  return math.ceil(byte_size / 1024. / 1024.)
++
++
+ def EnsureKvmdOnNodes(lu, feedback_fn, nodes=None):
+   """Ensure KVM daemon is running on nodes with KVM instances.
+   If user shutdown is enabled in the cluster:
+     - The KVM daemon will be started on VM capable nodes containing
+       KVM instances.
+     - The KVM daemon will be stopped on non VM capable nodes.
+   If user shutdown is disabled in the cluster:
+     - The KVM daemon will be stopped on all nodes
+   Issues a warning for each failed RPC call.
+   @type lu: L{LogicalUnit}
+   @param lu: logical unit on whose behalf we execute
+   @type feedback_fn: callable
+   @param feedback_fn: feedback function
+   @type nodes: list of string
+   @param nodes: if supplied, it overrides the node uuids to start/stop;
+                 this is used mainly for optimization
+   """
+   cluster = lu.cfg.GetClusterInfo()
+   # Either use the passed nodes or consider all cluster nodes
+   if nodes is not None:
+     node_uuids = set(nodes)
+   else:
+     node_uuids = lu.cfg.GetNodeList()
+   # Determine in which nodes should the KVM daemon be started/stopped
+   if constants.HT_KVM in cluster.enabled_hypervisors and \
+         cluster.enabled_user_shutdown:
+     start_nodes = []
+     stop_nodes = []
+     for node_uuid in node_uuids:
+       if lu.cfg.GetNodeInfo(node_uuid).vm_capable:
+         start_nodes.append(node_uuid)
+       else:
+         stop_nodes.append(node_uuid)
+   else:
+     start_nodes = []
+     stop_nodes = node_uuids
+   # Start KVM where necessary
+   if start_nodes:
+     results = lu.rpc.call_node_ensure_daemon(start_nodes, constants.KVMD, True)
+     for node_uuid in start_nodes:
+       results[node_uuid].Warn("Failed to start KVM daemon in node '%s'" %
+                               node_uuid, feedback_fn)
+   # Stop KVM where necessary
+   if stop_nodes:
+     results = lu.rpc.call_node_ensure_daemon(stop_nodes, constants.KVMD, False)
+     for node_uuid in stop_nodes:
+       results[node_uuid].Warn("Failed to stop KVM daemon in node '%s'" %
+                               node_uuid, feedback_fn)
Simple merge
Simple merge
Simple merge
@@@ -1400,11 -1301,10 +1400,11 @@@ def AssembleInstanceDisks(lu, instance
          node_disk = node_disk.Copy()
          node_disk.UnsetSize()
        result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
-                                              instance.name, False, idx)
+                                              instance, False, idx)
        msg = result.fail_msg
        if msg:
 -        is_offline_secondary = (node_uuid in instance.secondary_nodes and
 +        secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
 +        is_offline_secondary = (node_uuid in secondary_nodes and
                                  result.offline)
          lu.LogWarning("Could not prepare block device %s on node %s"
                        " (is_primary=False, pass=1): %s",
@@@ -422,10 -423,18 +423,12 @@@ class LUNodeAdd(LogicalUnit)
      # We create a new certificate even if the node is readded
      digest = CreateNewClientCert(self, self.new_node.uuid)
      if self.new_node.master_candidate:
 -      utils.AddNodeToCandidateCerts(self.new_node.uuid, digest,
 -                                    cluster.candidate_certs)
 -      self.cfg.Update(cluster, feedback_fn)
 +      self.cfg.AddNodeToCandidateCerts(self.new_node.uuid, digest)
      else:
 -      if self.new_node.uuid in cluster.candidate_certs:
 -        utils.RemoveNodeFromCandidateCerts(self.new_node.uuid,
 -                                           cluster.candidate_certs)
 -        self.cfg.Update(cluster, feedback_fn)
 +      self.cfg.RemoveNodeFromCandidateCerts(self.new_node.uuid, warn_fn=None)
  
+     EnsureKvmdOnNodes(self, feedback_fn, nodes=[self.new_node.uuid])
  
  class LUNodeSetParams(LogicalUnit):
    """Modifies the parameters of a node.
diff --cc lib/config.py
@@@ -3004,8 -2626,9 +3006,9 @@@ class ConfigWriter(object)
      node_data = fn(node_names)
      node_pri_ips_data = fn(node_pri_ips)
      node_snd_ips_data = fn(node_snd_ips)
+     node_vm_capable_data = fn(node_vm_capable)
  
 -    cluster = self._config_data.cluster
 +    cluster = self._ConfigData().cluster
      cluster_tags = fn(cluster.GetTags())
  
      master_candidates_certs = fn("%s=%s" % (mc_uuid, mc_cert)
@@@ -101,12 -112,31 +101,33 @@@ _RUNTIME_DEVICE = 
    }
  _RUNTIME_ENTRY = {
    constants.HOTPLUG_TARGET_NIC: lambda d, e: d,
-   constants.HOTPLUG_TARGET_DISK: lambda d, e: (d, e, None)
+   constants.HOTPLUG_TARGET_DISK: lambda d, e: (d, e[0], e[1])
    }
  
 +_MIGRATION_CAPS_DELIM = ":"
 +
  
+ def _GetDriveURI(disk, link, uri):
+   """Helper function to get the drive uri to be used in --drive kvm option
+   @type disk: L{objects.Disk}
+   @param disk: A disk configuration object
+   @type link: string
+   @param link: The device link as returned by _SymlinkBlockDev()
+   @type uri: string
+   @param uri: The drive uri as returned by _CalculateDeviceURI()
+   """
+   access_mode = disk.params.get(constants.LDP_ACCESS,
+                                 constants.DISK_KERNELSPACE)
+   if (uri and access_mode == constants.DISK_USERSPACE):
+     drive_uri = uri
+   else:
+     drive_uri = link
+   return drive_uri
  def _GenerateDeviceKVMId(dev_type, dev):
    """Helper function to generate a unique device name used by KVM
  
@@@ -937,15 -1353,10 +958,10 @@@ class KVMHypervisor(hv_base.BaseHypervi
          if needs_boot_flag and disk_type != constants.HT_DISK_IDE:
            boot_val = ",boot=on"
  
-       access_mode = cfdev.params.get(constants.LDP_ACCESS,
-                                      constants.DISK_KERNELSPACE)
-       if (uri and access_mode == constants.DISK_USERSPACE):
-         drive_uri = uri
-       else:
-         drive_uri = link_name
+       drive_uri = _GetDriveURI(cfdev, link_name, uri)
  
 -      drive_val = "file=%s,format=raw%s%s%s" % \
 -                  (drive_uri, if_val, boot_val, cache_val)
 +      drive_val = "file=%s,format=raw%s%s%s%s" % \
 +                  (drive_uri, if_val, boot_val, cache_val, aio_val)
  
        if device_driver:
          # kvm_disks are the 4th entry of runtime file that did not exist in
diff --cc lib/objects.py
@@@ -1601,10 -1592,7 +1601,11 @@@ class Cluster(TaggableObject)
      "enabled_disk_templates",
      "candidate_certs",
      "max_running_jobs",
 +    "install_image",
 +    "instance_communication_network",
 +    "zeroing_image",
 +    "compression_tools",
+     "enabled_user_shutdown",
      ] + _TIMESTAMPS + _UUID
  
    def UpgradeConfig(self):
      if self.max_running_jobs is None:
        self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
  
 +    if self.instance_communication_network is None:
 +      self.instance_communication_network = ""
 +
 +    if self.install_image is None:
 +      self.install_image = ""
 +
 +    if self.compression_tools is None:
 +      self.compression_tools = constants.IEC_DEFAULT_TOOLS
 +
+     if self.enabled_user_shutdown is None:
+       self.enabled_user_shutdown = False
    @property
    def primary_hypervisor(self):
      """The first hypervisor is the primary.
diff --cc lib/query.py
Simple merge
diff --cc lib/rpc_defs.py
Simple merge
Simple merge
diff --cc lib/ssconf.py
Simple merge
Simple merge
Simple merge
@@@ -202,9 -202,7 +202,10 @@@ INI
  | [\--hypervisor-state *hvstate*]
  | [\--drbd-usermode-helper *helper*]
  | [\--enabled-disk-templates *template* [,*template*...]]
 +| [\--install-image *image*]
 +| [\--zeroing-image *image*]
 +| [\--compression-tools [*tool*, [*tool*]]]
+ | [\--user-shutdown {yes \| no}]
  | {*clustername*}
  
  This commands is only run once initially on the first node of the
@@@ -693,10 -668,7 +701,11 @@@ MODIF
  | [\--drbd-usermode-helper *helper*]
  | [\--file-storage-dir *dir*]
  | [\--shared-file-storage-dir *dir*]
 +| [\--compression-tools [*tool*, [*tool*]]]
 +| [\--instance-communication-network *network*]
 +| [\--install-image *image*]
 +| [\--zeroing-image *image*]
+ | [\--user-shutdown {yes \| no}]
  
  
  Modify the options for the cluster.
@@@ -705,10 -677,11 +714,15 @@@ The ``--vg-name``, ``--enabled-hypervis
  ``-B (--backend-parameters)``, ``-D (--disk-parameters)``, ``--nic-parameters``,
  ``-C (--candidate-pool-size)``, ``--maintain-node-health``,
  ``--prealloc-wipe-disks``, ``--uid-pool``, ``--node-parameters``,
 +``--mac-prefix``, ``--master-netdev``, ``--master-netmask``,
 +``--use-external-mip-script``, ``--drbd-usermode-helper``,
 +``--file-storage-dir``, ``--shared-file-storage-dir``,
 +``--compression-tools``, and ``--enabled-disk-templates`` options are described in the **init** command.
+ ``--master-netdev``, ``--master-netmask``, ``--use-external-mip-script``,
+ ``--drbd-usermode-helper``, ``--file-storage-dir``,
+ ``--shared-file-storage-dir``, ``--enabled-disk-templates``, and
+ ``--user-shutdown`` options are
+ described in the **init** command.
  
  The ``--hypervisor-state`` and ``--disk-state`` options are described in
  detail in **ganeti**\(7).
@@@ -842,29 -821,10 +842,29 @@@ vnet\_hd
  
      It is set to ``true`` by default.
  
 +virtio\_net\_queues
 +    Valid for the KVM hypervisor.
 +
 +    Set a number of queues (file descriptors) for tap device to
 +    parallelize packets sending or receiving. Tap devices will be
 +    created with MULTI_QUEUE (IFF_MULTI_QUEUE) support. This only
 +    works with KVM paravirtual nics (virtio-net) and the maximum
 +    number of queues is limited to ``8``. Tehnically this is an
 +    extension of ``vnet_hdr`` which must be enabled for multiqueue
 +    support.
 +
 +    If set to ``1`` queue, it effectively disables multiqueue support
 +    on the tap and virio-net devices.
 +
 +    For instances it is necessary to manually set number of queues (on
 +    Linux using: ``ethtool -L ethX combined $queues``).
 +
 +    It is set to ``1`` by default.
 +
  The ``-O (--os-parameters)`` option allows customisation of the OS
- parameters. The actual parameter names and values depends on the OS
- being used, but the syntax is the same key=value. For example, setting
- a hypothetical ``dhcp`` parameter to yes can be achieved by::
+ parameters. The actual parameter names and values depend on the OS being
+ used, but the syntax is the same key=value. For example, setting a
+ hypothetical ``dhcp`` parameter to yes can be achieved by::
  
      gnt-instance add -O dhcp=yes ...
  
diff --cc qa/ganeti-qa.py
@@@ -244,7 -244,7 +244,8 @@@ def RunClusterTests()
      ("cluster-modify", qa_cluster.TestClusterModifyDiskTemplates),
      ("cluster-modify", qa_cluster.TestClusterModifyFileStorageDir),
      ("cluster-modify", qa_cluster.TestClusterModifySharedFileStorageDir),
 +    ("cluster-modify", qa_cluster.TestClusterModifyInstallImage),
+     ("cluster-modify", qa_cluster.TestClusterModifyUserShutdown),
      ("cluster-rename", qa_cluster.TestClusterRename),
      ("cluster-info", qa_cluster.TestClusterVersion),
      ("cluster-info", qa_cluster.TestClusterInfo),
Simple merge
@@@ -1290,8 -1239,9 +1290,9 @@@ def _TestInstanceUserDownXen(instance)
  def _TestInstanceUserDownKvm(instance):
    def _StopKVMInstance():
      AssertCommand("pkill -f \"\\-name %s\"" % instance.name, node=primary)
 -    time.sleep(5)
 +    time.sleep(10)
  
+   AssertCommand(["gnt-cluster", "modify", "--user-shutdown=true"])
    AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=true",
                   instance.name])
  
Simple merge
@@@ -747,54 -665,50 +747,55 @@@ type CandidateCertificates = Container 
  
  -- * Cluster definitions
  $(buildObject "Cluster" "cluster" $
 -  [ simpleField "rsahostkeypub"             [t| String           |]
 +  [ simpleField "rsahostkeypub"                  [t| String                 |]
    , optionalField $
 -    simpleField "dsahostkeypub"             [t| String           |]
 -  , simpleField "highest_used_port"         [t| Int              |]
 -  , simpleField "tcpudp_port_pool"          [t| [Int]            |]
 -  , simpleField "mac_prefix"                [t| String           |]
 +    simpleField "dsahostkeypub"                  [t| String                 |]
 +  , simpleField "highest_used_port"              [t| Int                    |]
 +  , simpleField "tcpudp_port_pool"               [t| [Int]                  |]
 +  , simpleField "mac_prefix"                     [t| String                 |]
    , optionalField $
 -    simpleField "volume_group_name"         [t| String           |]
 -  , simpleField "reserved_lvs"              [t| [String]         |]
 +    simpleField "volume_group_name"              [t| String                 |]
 +  , simpleField "reserved_lvs"                   [t| [String]               |]
    , optionalField $
 -    simpleField "drbd_usermode_helper"      [t| String           |]
 -  , simpleField "master_node"               [t| String           |]
 -  , simpleField "master_ip"                 [t| String           |]
 -  , simpleField "master_netdev"             [t| String           |]
 -  , simpleField "master_netmask"            [t| Int              |]
 -  , simpleField "use_external_mip_script"   [t| Bool             |]
 -  , simpleField "cluster_name"              [t| String           |]
 -  , simpleField "file_storage_dir"          [t| String           |]
 -  , simpleField "shared_file_storage_dir"   [t| String           |]
 -  , simpleField "gluster_storage_dir"       [t| String           |]
 -  , simpleField "enabled_hypervisors"       [t| [Hypervisor]     |]
 -  , simpleField "hvparams"                  [t| ClusterHvParams  |]
 -  , simpleField "os_hvp"                    [t| OsHvParams       |]
 -  , simpleField "beparams"                  [t| ClusterBeParams  |]
 -  , simpleField "osparams"                  [t| ClusterOsParams  |]
 -  , simpleField "nicparams"                 [t| ClusterNicParams |]
 -  , simpleField "ndparams"                  [t| FilledNDParams   |]
 -  , simpleField "diskparams"                [t| DiskParams       |]
 -  , simpleField "candidate_pool_size"       [t| Int              |]
 -  , simpleField "modify_etc_hosts"          [t| Bool             |]
 -  , simpleField "modify_ssh_setup"          [t| Bool             |]
 -  , simpleField "maintain_node_health"      [t| Bool             |]
 -  , simpleField "uid_pool"                  [t| UidPool          |]
 -  , simpleField "default_iallocator"        [t| String           |]
 -  , simpleField "default_iallocator_params" [t| IAllocatorParams |]
 -  , simpleField "hidden_os"                 [t| [String]         |]
 -  , simpleField "blacklisted_os"            [t| [String]         |]
 -  , simpleField "primary_ip_family"         [t| IpFamily         |]
 -  , simpleField "prealloc_wipe_disks"       [t| Bool             |]
 -  , simpleField "ipolicy"                   [t| FilledIPolicy    |]
 -  , simpleField "enabled_disk_templates"    [t| [DiskTemplate]   |]
 -  , simpleField "candidate_certs"           [t| CandidateCertificates |]
 -  , simpleField "max_running_jobs"          [t| Int              |]
 -  , simpleField "enabled_user_shutdown"     [t| Bool             |]
 +    simpleField "drbd_usermode_helper"           [t| String                 |]
 +  , simpleField "master_node"                    [t| String                 |]
 +  , simpleField "master_ip"                      [t| String                 |]
 +  , simpleField "master_netdev"                  [t| String                 |]
 +  , simpleField "master_netmask"                 [t| Int                    |]
 +  , simpleField "use_external_mip_script"        [t| Bool                   |]
 +  , simpleField "cluster_name"                   [t| String                 |]
 +  , simpleField "file_storage_dir"               [t| String                 |]
 +  , simpleField "shared_file_storage_dir"        [t| String                 |]
 +  , simpleField "gluster_storage_dir"            [t| String                 |]
 +  , simpleField "enabled_hypervisors"            [t| [Hypervisor]           |]
 +  , simpleField "hvparams"                       [t| ClusterHvParams        |]
 +  , simpleField "os_hvp"                         [t| OsHvParams             |]
 +  , simpleField "beparams"                       [t| ClusterBeParams        |]
 +  , simpleField "osparams"                       [t| ClusterOsParams        |]
 +  , simpleField "osparams_private_cluster"       [t| ClusterOsParamsPrivate |]
 +  , simpleField "nicparams"                      [t| ClusterNicParams       |]
 +  , simpleField "ndparams"                       [t| FilledNDParams         |]
 +  , simpleField "diskparams"                     [t| GroupDiskParams        |]
 +  , simpleField "candidate_pool_size"            [t| Int                    |]
 +  , simpleField "modify_etc_hosts"               [t| Bool                   |]
 +  , simpleField "modify_ssh_setup"               [t| Bool                   |]
 +  , simpleField "maintain_node_health"           [t| Bool                   |]
 +  , simpleField "uid_pool"                       [t| UidPool                |]
 +  , simpleField "default_iallocator"             [t| String                 |]
 +  , simpleField "default_iallocator_params"      [t| IAllocatorParams       |]
 +  , simpleField "hidden_os"                      [t| [String]               |]
 +  , simpleField "blacklisted_os"                 [t| [String]               |]
 +  , simpleField "primary_ip_family"              [t| IpFamily               |]
 +  , simpleField "prealloc_wipe_disks"            [t| Bool                   |]
 +  , simpleField "ipolicy"                        [t| FilledIPolicy          |]
 +  , simpleField "enabled_disk_templates"         [t| [DiskTemplate]         |]
 +  , simpleField "candidate_certs"                [t| CandidateCertificates  |]
 +  , simpleField "max_running_jobs"               [t| Int                    |]
 +  , simpleField "install_image"                  [t| String                 |]
 +  , simpleField "instance_communication_network" [t| String                 |]
 +  , simpleField "zeroing_image"                  [t| String                 |]
 +  , simpleField "compression_tools"              [t| [String]               |]
++  , simpleField "enabled_user_shutdown"          [t| Bool                   |]
   ]
   ++ timeStampFields
   ++ uuidFields
@@@ -239,10 -236,7 +239,11 @@@ $(genOpCode "OpCode
       , pClusterFileStorageDir
       , pClusterSharedFileStorageDir
       , pClusterGlusterStorageDir
 +     , pInstallImage
 +     , pInstanceCommunicationNetwork
 +     , pZeroingImage
 +     , pCompressionTools
+      , pEnabledUserShutdown
       ],
       [])
    , ("OpClusterRedistConf",
Simple merge
@@@ -173,12 -165,8 +173,14 @@@ handleCall _ _ cdata QueryClusterInfo 
              , ("hidden_os", showJSON $ clusterHiddenOs cluster)
              , ("blacklisted_os", showJSON $ clusterBlacklistedOs cluster)
              , ("enabled_disk_templates", showJSON diskTemplates)
 +            , ("install_image", showJSON $ clusterInstallImage cluster)
 +            , ("instance_communication_network",
 +               showJSON (clusterInstanceCommunicationNetwork cluster))
 +            , ("zeroing_image", showJSON $ clusterZeroingImage cluster)
 +            , ("compression_tools",
 +               showJSON $ clusterCompressionTools cluster)
+             , ("enabled_user_shutdown",
+                showJSON $ clusterEnabledUserShutdown cluster)
              ]
  
    in case master of
@@@ -30,17 -30,21 +30,24 @@@ module Ganeti.Sscon
    , sSKeyToRaw
    , sSKeyFromRaw
    , getPrimaryIPFamily
+   , parseNodesVmCapable
+   , getNodesVmCapable
    , getMasterCandidatesIps
    , getMasterNode
+   , parseHypervisorList
+   , getHypervisorList
+   , parseEnabledUserShutdown
+   , getEnabledUserShutdown
    , keyToFilename
    , sSFilePrefix
 +  , SSConf(..)
 +  , emptySSConf
    ) where
  
+ import Control.Applicative ((<$>))
  import Control.Exception
- import Control.Monad (liftM)
+ import Control.Monad (forM, liftM)
 +import qualified Data.Map as M
  import Data.Maybe (fromMaybe)
  import qualified Network.Socket as Socket
  import System.FilePath ((</>))
@@@ -50,13 -53,12 +57,15 @@@ import qualified Text.JSON as 
  import qualified AutoConf
  import Ganeti.BasicTypes
  import qualified Ganeti.Constants as C
 +import Ganeti.JSON (GenericContainer(..), HasStringRepr(..))
  import qualified Ganeti.Path as Path
  import Ganeti.THH
+ import Ganeti.Types (Hypervisor)
+ import qualified Ganeti.Types as Types
  import Ganeti.Utils
  
 +-- * Reading individual ssconf entries
 +
  -- | Maximum ssconf file size we support.
  maxFileSize :: Int
  maxFileSize = 131072
@@@ -90,13 -92,9 +100,14 @@@ $(declareSADT "SSKey
    , ("SSMaintainNodeHealth",   'C.ssMaintainNodeHealth)
    , ("SSUidPool",              'C.ssUidPool)
    , ("SSNodegroups",           'C.ssNodegroups)
 +  , ("SSNetworks",             'C.ssNetworks)
+   , ("SSEnabledUserShutdown",  'C.ssEnabledUserShutdown)
    ])
  
 +instance HasStringRepr SSKey where
 +  fromStringRepr = sSKeyFromRaw
 +  toStringRepr = sSKeyToRaw
 +
  -- | Convert a ssconf key into a (full) file path.
  keyToFilename :: FilePath     -- ^ Config path root
                -> SSKey        -- ^ Ssconf key
@@@ -156,17 -175,26 +188,39 @@@ getMasterCandidatesIps optPath = d
  getMasterNode :: Maybe FilePath -> IO (Result String)
  getMasterNode optPath = do
    result <- readSSConfFile optPath Nothing SSMasterNode
 -  return $ liftM rStripSpace result
 +  return (liftM rStripSpace result)
  
+ -- | Parse the list of enabled hypervisors from a 'String'.
+ parseHypervisorList :: String -> Result [Hypervisor]
+ parseHypervisorList str =
+   mapM Types.hypervisorFromRaw $ lines str
+ -- | Read and parse the list of enabled hypervisors.
+ getHypervisorList :: Maybe FilePath -> IO (Result [Hypervisor])
+ getHypervisorList optPath =
+   (parseHypervisorList =<<) <$>
+     readSSConfFile optPath Nothing SSHypervisorList
+ -- | Parse whether user shutdown is enabled from a 'String'.
+ parseEnabledUserShutdown :: String -> Result Bool
+ parseEnabledUserShutdown str =
+   tryRead "Parsing enabled_user_shutdown" (rStripSpace str)
+ -- | Read and parse whether user shutdown is enabled.
+ getEnabledUserShutdown :: Maybe FilePath -> IO (Result Bool)
+ getEnabledUserShutdown optPath =
+   (parseEnabledUserShutdown =<<) <$>
+     readSSConfFile optPath Nothing SSEnabledUserShutdown
++
 +-- * Working with the whole ssconf map
 +
 +-- | The data type used for representing the ssconf.
 +newtype SSConf = SSConf { getSSConf :: M.Map SSKey [String] }
 +  deriving (Eq, Ord, Show)
 +
 +instance J.JSON SSConf where
 +  showJSON = J.showJSON . GenericContainer . getSSConf
 +  readJSON = liftM (SSConf . fromContainer) . J.readJSON
 +
 +emptySSConf :: SSConf
 +emptySSConf = SSConf M.empty
Simple merge
Simple merge
@@@ -57,6 -55,6 +57,7 @@@
          "enabled_hypervisors": [
              "xen-pvm"
          ],
++        "enabled_user_shutdown": false,
          "file_storage_dir": "",
          "hidden_os": [],
          "highest_used_port": 32105,
@@@ -171,47 -172,18 +171,48 @@@ instance Arbitrary OpCodes.OpCode wher
        "OP_CLUSTER_RENAME" ->
          OpCodes.OpClusterRename <$> genNameNE
        "OP_CLUSTER_SET_PARAMS" ->
 -        OpCodes.OpClusterSetParams <$> arbitrary <*> emptyMUD <*> emptyMUD <*>
 -          arbitrary <*> genMaybe arbitrary <*>
 -          genMaybe genEmptyContainer <*> emptyMUD <*>
 -          genMaybe genEmptyContainer <*> genMaybe genEmptyContainer <*>
 -          genMaybe genEmptyContainer <*> genMaybe arbitrary <*>
 -          genMaybe arbitrary <*>
 -          arbitrary <*> arbitrary <*> arbitrary <*>
 -          arbitrary <*> arbitrary <*> arbitrary <*>
 -          emptyMUD <*> emptyMUD <*> arbitrary <*>
 -          arbitrary  <*> emptyMUD <*> arbitrary <*> arbitrary <*> arbitrary <*>
 -          arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
 -          arbitrary <*> genMaybe genName <*> genMaybe genName <*> arbitrary
 +        OpCodes.OpClusterSetParams
 +          <$> arbitrary                    -- force
 +          <*> emptyMUD                     -- hv_state
 +          <*> emptyMUD                     -- disk_state
 +          <*> arbitrary                    -- vg_name
 +          <*> genMaybe arbitrary           -- enabled_hypervisors
 +          <*> genMaybe genEmptyContainer   -- hvparams
 +          <*> emptyMUD                     -- beparams
 +          <*> genMaybe genEmptyContainer   -- os_hvp
 +          <*> genMaybe genEmptyContainer   -- osparams
 +          <*> genMaybe genEmptyContainer   -- osparams_private_cluster
 +          <*> genMaybe genEmptyContainer   -- diskparams
 +          <*> genMaybe arbitrary           -- candidate_pool_size
 +          <*> genMaybe arbitrary           -- max_running_jobs
 +          <*> arbitrary                    -- uid_pool
 +          <*> arbitrary                    -- add_uids
 +          <*> arbitrary                    -- remove_uids
 +          <*> arbitrary                    -- maintain_node_health
 +          <*> arbitrary                    -- prealloc_wipe_disks
 +          <*> arbitrary                    -- nicparams
 +          <*> emptyMUD                     -- ndparams
 +          <*> emptyMUD                     -- ipolicy
 +          <*> arbitrary                    -- drbd_helper
 +          <*> arbitrary                    -- default_iallocator
 +          <*> emptyMUD                     -- default_iallocator_params
 +          <*> genMaybe genMacPrefix        -- mac_prefix
 +          <*> arbitrary                    -- master_netdev
 +          <*> arbitrary                    -- master_netmask
 +          <*> arbitrary                    -- reserved_lvs
 +          <*> arbitrary                    -- hidden_os
 +          <*> arbitrary                    -- blacklisted_os
 +          <*> arbitrary                    -- use_external_mip_script
 +          <*> arbitrary                    -- enabled_disk_templates
 +          <*> arbitrary                    -- modify_etc_hosts
 +          <*> genMaybe genName             -- file_storage_dir
 +          <*> genMaybe genName             -- shared_file_storage_dir
 +          <*> genMaybe genName             -- gluster_file_storage_dir
 +          <*> arbitrary                    -- install_image
 +          <*> arbitrary                    -- instance_communication_network
 +          <*> arbitrary                    -- zeroing_image
 +          <*> arbitrary                    -- compression_tools
++          <*> arbitrary                    -- enabled_user_shutdown
        "OP_CLUSTER_REDIST_CONF" -> pure OpCodes.OpClusterRedistConf
        "OP_CLUSTER_ACTIVATE_MASTER_IP" ->
          pure OpCodes.OpClusterActivateMasterIp
@@@ -29,14 -29,14 +29,16 @@@ Foundation, Inc., 51 Franklin Street, F
  module Test.Ganeti.Ssconf (testSsconf) where
  
  import Test.QuickCheck
+ import qualified Test.HUnit as HUnit
  
  import Data.List
 +import qualified Data.Map as M
  
  import Test.Ganeti.TestHelper
 +import Test.Ganeti.TestCommon
  
  import qualified Ganeti.Ssconf as Ssconf
+ import qualified Ganeti.Types as Types
  
  -- * Ssconf tests
  
@@@ -52,13 -47,34 +54,41 @@@ prop_filename key 
    printTestCase "Key doesn't start with correct prefix" $
      Ssconf.sSFilePrefix `isPrefixOf` Ssconf.keyToFilename "" key
  
+ caseParseNodesVmCapable :: HUnit.Assertion
+ caseParseNodesVmCapable = do
+   let str = "node1.example.com=True\nnode2.example.com=False"
+       result = Ssconf.parseNodesVmCapable str
+       expected = return
+         [ ("node1.example.com", True)
+         , ("node2.example.com", False)
+         ]
+   HUnit.assertEqual "Mismatch in parsed and expected result" expected result
+ caseParseHypervisorList :: HUnit.Assertion
+ caseParseHypervisorList = do
+   let result = Ssconf.parseHypervisorList "kvm\nxen-pvm\nxen-hvm"
+       expected = return [Types.Kvm, Types.XenPvm, Types.XenHvm]
+   HUnit.assertEqual "Mismatch in parsed and expected result" expected result
+ caseParseEnabledUserShutdown :: HUnit.Assertion
+ caseParseEnabledUserShutdown = do
+   let result1 = Ssconf.parseEnabledUserShutdown "True"
+       result2 = Ssconf.parseEnabledUserShutdown "False"
+   HUnit.assertEqual "Mismatch in parsed and expected result"
+     (return True) result1
+   HUnit.assertEqual "Mismatch in parsed and expected result"
+     (return False) result2
 +-- * Creating and writing SSConf
 +
 +-- | Verify that for SSConf we have readJSON . showJSON = Ok.
 +prop_ReadShow :: Ssconf.SSConf -> Property
 +prop_ReadShow = testSerialisation
 +
  testSuite "Ssconf"
    [ 'prop_filename
+   , 'caseParseNodesVmCapable
+   , 'caseParseHypervisorList
+   , 'caseParseEnabledUserShutdown
 +  , 'prop_ReadShow
    ]
@@@ -48,13 -47,9 +48,14 @@@ def GetMinimalConfig()
        "default_iallocator_params": {},
        "ndparams": {},
        "candidate_certs": {},
 +      "install_image": "",
 +      "instance_communication_network": "",
 +      "zeroing_image": "",
 +      "compression_tools": constants.IEC_DEFAULT_TOOLS,
+       "enabled_user_shutdown": False,
      },
      "instances": {},
 +    "disks": {},
      "networks": {},
      "nodegroups": {},
      "nodes": {
Simple merge
Simple merge
@@@ -36,8 -36,8 +36,8 @@@ if ! grep -q '^ENABLE_MOND = ' lib/_con
    err "Please update $0, mond enable feature is missing"
  fi
  
- DAEMONS_LIST="noded wconfd rapi luxid"
- STOPDAEMONS_LIST="luxid rapi wconfd noded"
 -DAEMONS_LIST="noded masterd rapi luxid kvmd"
 -STOPDAEMONS_LIST="kvmd luxid rapi masterd noded"
++DAEMONS_LIST="noded wconfd rapi luxid kvmd"
++STOPDAEMONS_LIST="kvmd luxid rapi wconfd noded"
  
  if grep -q '^ENABLE_CONFD = True' lib/_constants.py; then
    DAEMONS_LIST="$DAEMONS_LIST confd"
Simple merge
@@@ -150,16 -150,9 +150,18 @@@ def UpgradeCluster(config_data)
    ial_params = cluster.get("default_iallocator_params", None)
    if not ial_params:
      cluster["default_iallocator_params"] = {}
 -  cluster["candidate_certs"] = cluster.get("candidate_certs", {})
 +  if not "candidate_certs" in cluster:
 +    cluster["candidate_certs"] = {}
 +  cluster["instance_communication_network"] = \
 +    cluster.get("instance_communication_network", "")
 +  cluster["install_image"] = \
 +    cluster.get("install_image", "")
 +  cluster["zeroing_image"] = \
 +    cluster.get("zeroing_image", "")
 +  cluster["compression_tools"] = \
 +    cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS)
+   if "enabled_user_shutdown" not in cluster:
+     cluster["enabled_user_shutdown"] = False
  
  
  def UpgradeGroups(config_data):