diff --git a/README.rst b/README.rst
index 3da333c8be661eb06a070f20f3f713d2a0068e12..7947e31073db0a8793a7e92efd5ef68047a7ed2a 100644
--- a/README.rst
+++ b/README.rst
@@ -32,6 +32,8 @@ Features
 * Discovery, introspection and provisioning of bare metal compute hosts
   using `OpenStack ironic <https://docs.openstack.org/developer/ironic/>`_ and
   `ironic inspector <https://docs.openstack.org/developer/ironic-inspector/>`_
+* Virtualised compute using `OpenStack nova
+  <https://docs.openstack.org/developer/nova/>`_
 * Containerised workloads on bare metal using `OpenStack magnum
   <https://docs.openstack.org/developer/magnum/>`_
 * Big data on bare metal using `OpenStack sahara
@@ -41,5 +43,3 @@ In the near future we aim to add support for the following:
 
 * Control plane and workload monitoring and log aggregation using `OpenStack
   monasca <https://wiki.openstack.org/wiki/Monasca>`_
-* Virtualised compute using `OpenStack nova
-  <https://docs.openstack.org/developer/nova/>`_
diff --git a/ansible/compute-node-discovery.yml b/ansible/compute-node-discovery.yml
index 6dded81c14a939405d69814401d2ae3e28ba0399..93cf7548f8256aa0abca33dfb17b1e54c13aecb2 100644
--- a/ansible/compute-node-discovery.yml
+++ b/ansible/compute-node-discovery.yml
@@ -1,8 +1,8 @@
 ---
 - include: dell-compute-node-inventory.yml
 
-- name: Ensure compute nodes are PXE booted
-  hosts: compute
+- name: Ensure baremetal compute nodes are PXE booted
+  hosts: baremetal-compute
   gather_facts: no
   vars:
     controller_host: "{{ groups['controllers'][0] }}"
@@ -19,7 +19,7 @@
         # be respected when using delegate_to.
         ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
 
-    - name: Ensure compute nodes are powered off
+    - name: Ensure baremetal compute nodes are powered off
       command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis power off
       delegate_to: "{{ controller_host }}"
       vars:
@@ -31,7 +31,7 @@
       pause:
         seconds: 5
 
-    - name: Ensure compute nodes are set to boot via PXE
+    - name: Ensure baremetal compute nodes are set to boot via PXE
       command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis bootdev pxe
       delegate_to: "{{ controller_host }}"
       vars:
@@ -43,7 +43,7 @@
       pause:
         seconds: 5
 
-    - name: Ensure compute nodes are powered on
+    - name: Ensure baremetal compute nodes are powered on
       command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis power on
       delegate_to: "{{ controller_host }}"
       vars:
diff --git a/ansible/compute-node-provide.yml b/ansible/compute-node-provide.yml
index e06959900d165186aca87875a02524e1a991e96f..2e935501529ce28c1fa246c457c298ba62fca4e8 100644
--- a/ansible/compute-node-provide.yml
+++ b/ansible/compute-node-provide.yml
@@ -1,14 +1,14 @@
 ---
-# This playbook will ensure that all compute nodes in the overcloud ironic
-# inventory are available. Supported initial states include 'enroll' and
+# This playbook will ensure that all baremetal compute nodes in the overcloud
+# ironic inventory are available. Supported initial states include 'enroll' and
 # 'manageable'.
 
-- name: Ensure compute nodes are available in ironic
+- name: Ensure baremetal compute nodes are available in ironic
   hosts: controllers[0]
   vars:
     venv: "{{ virtualenv_path }}/shade"
-    # Set this to a colon-separated list of compute node hostnames to provide.
-    # If unset, all compute nodes will be provided.
+    # Set this to a colon-separated list of baremetal compute node hostnames to
+    # provide.  If unset, all baremetal compute nodes will be provided.
     compute_node_limit: ""
     compute_node_limit_list: "{{ compute_node_limit.split(':') }}"
   roles:
@@ -74,7 +74,7 @@
     - name: Fail if any ironic nodes are not available
       fail:
         msg: >
-          Failed to make compute node {{ item['Name'] }} available in ironic.
+          Failed to make baremetal compute node {{ item['Name'] }} available in ironic.
           Provisioning state is {{ item['Provisioning State'] }}.
       with_items: "{{ ironic_nodes }}"
       when: item['Provisioning State'] != 'available'
diff --git a/ansible/dell-compute-node-boot-mode.yml b/ansible/dell-compute-node-boot-mode.yml
index fe4bdbd8797126d077fe588afde77a224c479c45..bf78317f137bb221d1ffa233ea655f44c0f32d81 100644
--- a/ansible/dell-compute-node-boot-mode.yml
+++ b/ansible/dell-compute-node-boot-mode.yml
@@ -1,11 +1,11 @@
 ---
-# Set the boot mode (BIOS, UEFI) of Dell compute nodes.
+# Set the boot mode (BIOS, UEFI) of Dell baremetal compute nodes.
 
-# Add compute nodes to the Ansible inventory.
+# Add Dell baremetal compute nodes to the Ansible inventory.
 - include: dell-compute-node-boot-mode.yml
 
-- name: Ensure compute nodes boot mode is set
-  hosts: compute
+- name: Ensure Dell baremetal compute nodes boot mode is set
+  hosts: baremetal-compute
   gather_facts: no
   vars:
     # Set this to the required boot mode. One of 'bios' or 'uefi'.
diff --git a/ansible/dell-compute-node-discovery.yml b/ansible/dell-compute-node-discovery.yml
index 597e641fea0d2c17a92ad9359001fca7dc91885e..9cf26cc4bad21a29893cf19bd4533f70a7655c49 100644
--- a/ansible/dell-compute-node-discovery.yml
+++ b/ansible/dell-compute-node-discovery.yml
@@ -1,11 +1,11 @@
 ---
-# Configure the compute nodes to PXE boot.
+# Configure the Dell baremetal compute nodes to PXE boot.
 
-# Add compute nodes to the Ansible inventory.
+# Add Dell baremetal compute nodes to the Ansible inventory.
 - include: dell-compute-node-inventory.yml
 
-- name: Ensure compute nodes are PXE booted
-  hosts: compute
+- name: Ensure Dell baremetal compute nodes are PXE booted
+  hosts: baremetal-compute
   gather_facts: no
   vars:
     # Set this to the index of the inteface on which to enable PXE.
diff --git a/ansible/dell-compute-node-inventory.yml b/ansible/dell-compute-node-inventory.yml
index 90ed61b9719ffc59cde9cf16fcfc3cd907edce57..ac3279a6bf9ffe3bfd431d537ef66596354b934d 100644
--- a/ansible/dell-compute-node-inventory.yml
+++ b/ansible/dell-compute-node-inventory.yml
@@ -1,17 +1,18 @@
 ---
-- name: Ensure compute nodes are present in the Ansible inventory
+- name: Ensure Dell baremetal compute nodes are present in the Ansible inventory
   hosts: config-mgmt
   gather_facts: no
   vars:
-    # Set this to a colon-separated list of compute node hostnames on which to
-    # trigger discovery. If unset, all compute nodes will be triggered.
+    # Set this to a colon-separated list of baremeal compute node hostnames on
+    # which to trigger discovery. If unset, all compute nodes will be
+    # triggered.
     compute_node_limit: ""
     compute_node_limit_list: "{{ compute_node_limit.split(':') }}"
   tasks:
-    - name: Add hosts for the compute nodes
+    - name: Add hosts for the Dell baremetal compute nodes
       add_host:
         name: "{{ item.key }}"
-        groups: compute
+        groups: baremetal-compute
         # SSH configuration to access the BMC.
         ansible_host: "{{ item.value }}"
         ansible_user: "{{ ipmi_username }}"
@@ -24,8 +25,8 @@
         - not compute_node_limit or item.key | replace('-idrac', '') in compute_node_limit_list
       run_once: True
 
-- name: Ensure compute nodes are present in the Ansible inventory
-  hosts: compute
+- name: Ensure Dell baremetal compute nodes are present in the Ansible inventory
+  hosts: baremetal-compute
   gather_facts: no
   vars:
     compute_node_limit: ""
@@ -33,7 +34,9 @@
   tasks:
     - name: Set facts for the compute nodes for IPMI addresses
       set_fact:
+        bmc_type: idrac
         ipmi_address: "{{ idrac_network_ips[inventory_hostname] }}"
-      # Don't add hosts that already exist.
-      when: not compute_node_limit or inventory_hostname in compute_node_limit_list
+      when:
+        - not ipmi_address
+        - not compute_node_limit or inventory_hostname in compute_node_limit_list
       run_once: True
diff --git a/ansible/group_vars/all/compute b/ansible/group_vars/all/compute
new file mode 100644
index 0000000000000000000000000000000000000000..0c3878052833c66ec0d7340a17000605f8148f52
--- /dev/null
+++ b/ansible/group_vars/all/compute
@@ -0,0 +1,115 @@
+---
+###############################################################################
+# Compute node configuration.
+
+# User with which to access the computes via SSH during bootstrap, in order
+# to setup the Kayobe user account.
+compute_bootstrap_user: "{{ lookup('env', 'USER') }}"
+
+###############################################################################
+# Compute network interface configuration.
+
+# List of networks to which compute nodes are attached.
+compute_network_interfaces: >
+  {{ (compute_default_network_interfaces +
+      compute_extra_network_interfaces) | unique | list }}
+
+# List of default networks to which compute nodes are attached.
+compute_default_network_interfaces: >
+  {{ [provision_oc_net_name,
+      internal_net_name,
+      storage_net_name] | unique | list }}
+
+# List of extra networks to which compute nodes are attached.
+compute_extra_network_interfaces: []
+
+###############################################################################
+# Compute node BIOS configuration.
+
+# Dict of compute BIOS options. Format is same as that used by stackhpc.drac
+# role.
+compute_bios_config: "{{ compute_bios_config_default | combine(compute_bios_config_extra) }}"
+
+# Dict of default compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+compute_bios_config_default: {}
+
+# Dict of additional compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+compute_bios_config_extra: {}
+
+###############################################################################
+# Compute node RAID configuration.
+
+# List of compute RAID volumes. Format is same as that used by stackhpc.drac
+# role.
+compute_raid_config: "{{ compute_raid_config_default + compute_raid_config_extra }}"
+
+# List of default compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+compute_raid_config_default: []
+
+# List of additional compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+compute_raid_config_extra: []
+
+###############################################################################
+# Compute node LVM configuration.
+
+# List of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+compute_lvm_groups: "{{ compute_lvm_groups_default + compute_lvm_groups_extra }}"
+
+# Default list of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+compute_lvm_groups_default:
+  - "{{ compute_lvm_group_data }}"
+
+# Additional list of compute volume groups. See mrlesmithjr.manage-lvm role
+# for format.
+compute_lvm_groups_extra: []
+
+# Compute LVM volume group for data. See mrlesmithjr.manage-lvm role for
+# format.
+compute_lvm_group_data:
+  vgname: data
+  disks: "{{ compute_lvm_group_data_disks | join(',') }}"
+  create: True
+  lvnames: "{{ compute_lvm_group_data_lvs }}"
+
+# List of disks for use by compute LVM data volume group. Default to an
+# invalid value to require configuration.
+compute_lvm_group_data_disks:
+  - changeme
+
+# List of LVM logical volumes for the data volume group.
+compute_lvm_group_data_lvs:
+  - "{{ compute_lvm_group_data_lv_docker_volumes }}"
+
+# Docker volumes LVM backing volume.
+compute_lvm_group_data_lv_docker_volumes:
+  lvname: docker-volumes
+  size: "{{ compute_lvm_group_data_lv_docker_volumes_size }}"
+  create: True
+  filesystem: "{{ compute_lvm_group_data_lv_docker_volumes_fs }}"
+  mount: True
+  mntp: /var/lib/docker/volumes
+
+# Size of docker volumes LVM backing volume.
+compute_lvm_group_data_lv_docker_volumes_size: 75%VG
+
+# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
+compute_lvm_group_data_lv_docker_volumes_fs: ext4
+
+###############################################################################
+# Compute node sysctl configuration.
+
+# Dict of sysctl parameters to set.
+compute_sysctl_parameters: {}
+
+###############################################################################
+# Compute node user configuration.
+
+# List of users to create. This should be in a format accepted by the
+# singleplatform-eng.users role.
+compute_users: "{{ users_default }}"
diff --git a/ansible/group_vars/all/inspector b/ansible/group_vars/all/inspector
index 463f22ed22cbeee6eeceb18dfe183640d9cd82fe..422f5971a7fffbfe95cca24b62084e35f1894483 100644
--- a/ansible/group_vars/all/inspector
+++ b/ansible/group_vars/all/inspector
@@ -323,7 +323,7 @@ inspector_dell_switch_lldp_workaround_group:
 # data which may be useful in environments without Swift.
 
 # Whether the inspection data store is enabled.
-inspector_store_enabled: "{{ not kolla_enable_swift | bool }}"
+inspector_store_enabled: "{{ kolla_enable_ironic | bool and not kolla_enable_swift | bool }}"
 
 # Port on which the inspection data store should listen.
 inspector_store_port: 8080
diff --git a/ansible/group_vars/all/kolla b/ansible/group_vars/all/kolla
index be8a9926429534e48a9123348293f1f10e8e3d47..431323b792bc81ea90c0b24d4616870e4fce25cc 100644
--- a/ansible/group_vars/all/kolla
+++ b/ansible/group_vars/all/kolla
@@ -227,6 +227,9 @@ kolla_overcloud_inventory_top_level_group_map:
   network:
     groups:
       - network
+  compute:
+    groups:
+      - compute
 
 # List of names of top level kolla-ansible groups. Any of these groups which
 # have no hosts mapped to them will be provided with an empty group definition.
diff --git a/ansible/group_vars/compute/ansible-user b/ansible/group_vars/compute/ansible-user
new file mode 100644
index 0000000000000000000000000000000000000000..5f74de0c13346230d1707e90d79af4c4738e1831
--- /dev/null
+++ b/ansible/group_vars/compute/ansible-user
@@ -0,0 +1,7 @@
+---
+# User with which to access the computes via SSH.
+ansible_user: "{{ kayobe_ansible_user }}"
+
+# User with which to access the computes before the kayobe_ansible_user
+# account has been created.
+bootstrap_user: "{{ compute_bootstrap_user }}"
diff --git a/ansible/group_vars/compute/bios b/ansible/group_vars/compute/bios
new file mode 100644
index 0000000000000000000000000000000000000000..b53bc641cafd169dc7ebf0b39532d534fef2609a
--- /dev/null
+++ b/ansible/group_vars/compute/bios
@@ -0,0 +1,7 @@
+---
+###############################################################################
+# Compute node BIOS configuration.
+
+# Dict of monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+bios_config: "{{ compute_bios_config }}"
diff --git a/ansible/group_vars/compute/lvm b/ansible/group_vars/compute/lvm
new file mode 100644
index 0000000000000000000000000000000000000000..5c6889ec5f8dec63e991d5b577d11be8e693e2e8
--- /dev/null
+++ b/ansible/group_vars/compute/lvm
@@ -0,0 +1,6 @@
+---
+###############################################################################
+# Compute node LVM configuration.
+
+# List of LVM volume groups.
+lvm_groups: "{{ compute_lvm_groups }}"
diff --git a/ansible/group_vars/compute/network b/ansible/group_vars/compute/network
new file mode 100644
index 0000000000000000000000000000000000000000..94810f07a9217d128d47ddff2925c6c8a8847775
--- /dev/null
+++ b/ansible/group_vars/compute/network
@@ -0,0 +1,6 @@
+---
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which these nodes are attached.
+network_interfaces: "{{ compute_network_interfaces | unique | list }}"
diff --git a/ansible/group_vars/compute/raid b/ansible/group_vars/compute/raid
new file mode 100644
index 0000000000000000000000000000000000000000..598d0b4bf391f471eef4ebf53f5f3f2676dfc23d
--- /dev/null
+++ b/ansible/group_vars/compute/raid
@@ -0,0 +1,7 @@
+---
+###############################################################################
+# Compute node RAID configuration.
+
+# List of compute node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+raid_config: "{{ compute_raid_config }}"
diff --git a/ansible/group_vars/compute/sysctl b/ansible/group_vars/compute/sysctl
new file mode 100644
index 0000000000000000000000000000000000000000..16cf547d95e77eab4f9bc57892dd9d172a88bf03
--- /dev/null
+++ b/ansible/group_vars/compute/sysctl
@@ -0,0 +1,3 @@
+---
+# Dict of sysctl parameters to set.
+sysctl_parameters: "{{ compute_sysctl_parameters }}"
diff --git a/ansible/group_vars/compute/users b/ansible/group_vars/compute/users
new file mode 100644
index 0000000000000000000000000000000000000000..5e0c81588bd370c70d058c0fa344d457dbbe319d
--- /dev/null
+++ b/ansible/group_vars/compute/users
@@ -0,0 +1,4 @@
+---
+# List of users to create. This should be in a format accepted by the
+# singleplatform-eng.users role.
+users: "{{ compute_users }}"
diff --git a/ansible/kolla-openstack.yml b/ansible/kolla-openstack.yml
index 3aaf3f938c57bc13979510bff3633bad89397ce5..1250f818fa5c61c8e1a8e2b5db8cf36edf8d2981 100644
--- a/ansible/kolla-openstack.yml
+++ b/ansible/kolla-openstack.yml
@@ -1,6 +1,14 @@
 ---
+
+- name: Check whether Ironic is enabled
+  hosts: controllers
+  tasks:
+    - name: Create controllers group with ironic enabled
+      group_by:
+        key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
 - name: Ensure locally built Ironic Python Agent images are copied
-  hosts: controllers[0]
+  hosts: controllers_with_ironic_enabled_True
   vars:
     # These are the filenames generated by overcloud-ipa-build.yml.
     ipa_image_name: "ipa"
@@ -139,17 +147,19 @@
               }}
           with_items: "{{ kolla_neutron_ml2_generic_switch_hosts }}"
 
-        - name: Set facts containing IPA kernel and ramdisk URLs
-          set_fact:
-            kolla_inspector_ipa_kernel_upstream_url: "{{ inspector_ipa_kernel_upstream_url }}"
-            kolla_inspector_ipa_ramdisk_upstream_url: "{{ inspector_ipa_ramdisk_upstream_url }}"
-          when: not ipa_build_images | bool
+        - block:
+            - name: Set facts containing IPA kernel and ramdisk URLs
+              set_fact:
+                kolla_inspector_ipa_kernel_upstream_url: "{{ inspector_ipa_kernel_upstream_url }}"
+                kolla_inspector_ipa_ramdisk_upstream_url: "{{ inspector_ipa_ramdisk_upstream_url }}"
+              when: not ipa_build_images | bool
 
-        - name: Set facts containing IPA kernel and ramdisk paths
-          set_fact:
-            kolla_inspector_ipa_kernel_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_kernel_name }}"
-            kolla_inspector_ipa_ramdisk_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_ramdisk_name }}"
-          when: ipa_build_images | bool
+            - name: Set facts containing IPA kernel and ramdisk paths
+              set_fact:
+                kolla_inspector_ipa_kernel_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_kernel_name }}"
+                kolla_inspector_ipa_ramdisk_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_ramdisk_name }}"
+              when: ipa_build_images | bool
+          when: kolla_enable_ironic | bool
       tags:
         - config
   roles:
diff --git a/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml b/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml
index 3d369433cb8a58a805e010db79bfc5ae892ad52c..ac44d5010bcfd2cee725bdf2b793b53ac0c21099 100644
--- a/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml
+++ b/ansible/overcloud-introspection-rules-dell-lldp-workaround.yml
@@ -8,8 +8,16 @@
 # each ironic node that matches against the switch system and the relevant
 # interface name, then sets the node's name appropriately.
 
-- name: Group controller hosts in systems requiring the workaround
+- name: Check whether Ironic is enabled
   hosts: controllers
+  tasks:
+    - name: Create controllers group with ironic enabled
+      group_by:
+        key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
+- name: Group controller hosts in systems requiring the workaround
+  hosts: controllers_with_ironic_enabled_True
+  gather_facts: False
   tasks:
     - name: Group controller hosts in systems requiring the Dell switch LLDP workaround
       group_by:
@@ -18,6 +26,7 @@
 - name: Ensure introspection rules for Dell switch LLDP workarounds are registered in Ironic Inspector
   # Only required to run on a single host.
   hosts: controllers_require_workaround_True[0]
+  gather_facts: False
   vars:
     all_switch_interfaces: []
     ironic_inspector_rules: []
diff --git a/ansible/overcloud-introspection-rules.yml b/ansible/overcloud-introspection-rules.yml
index bbb88e8b70395e8620f19ac90fd3b832debf91a0..502373e150ec978628092fd993b1d22b7ba63c69 100644
--- a/ansible/overcloud-introspection-rules.yml
+++ b/ansible/overcloud-introspection-rules.yml
@@ -1,7 +1,15 @@
 ---
+- name: Check whether Ironic is enabled
+  hosts: controllers
+  tasks:
+    - name: Create controllers group with ironic enabled
+      group_by:
+        key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
 - name: Ensure introspection rules are registered in Ironic Inspector
   # Only required to run on a single host.
-  hosts: controllers[0]
+  hosts: controllers_with_ironic_enabled_True[0]
+  gather_facts: False
   vars:
     venv: "{{ virtualenv_path }}/shade"
   pre_tasks:
diff --git a/ansible/overcloud-ipa-build.yml b/ansible/overcloud-ipa-build.yml
index ab38300a1c165c09660620b0c46c86b26d7d8740..587a8b46e0a50bbb963beccff38acc17f0bd65bf 100644
--- a/ansible/overcloud-ipa-build.yml
+++ b/ansible/overcloud-ipa-build.yml
@@ -4,8 +4,16 @@
 #
 # The images will be stored in {{ image_cache_path }}/{{ ipa_image_name }}.
 
+- name: Check whether Ironic is enabled
+  hosts: controllers
+  tasks:
+    - name: Create controllers group with ironic enabled
+      group_by:
+        key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
 - name: Ensure Ironic Python Agent images are built and installed
-  hosts: controllers[0]
+  hosts: controllers_with_ironic_enabled_True[0]
+  gather_facts: False
   vars:
     ipa_image_name: "ipa"
   tasks:
diff --git a/ansible/overcloud-ipa-images.yml b/ansible/overcloud-ipa-images.yml
index 7d59fb15cd3c96880cacd222a6724ec3b85e1112..488ea66c7a554195ace95eef1724920d7615123c 100644
--- a/ansible/overcloud-ipa-images.yml
+++ b/ansible/overcloud-ipa-images.yml
@@ -1,6 +1,14 @@
 ---
+- name: Check whether Ironic is enabled
+  hosts: controllers
+  tasks:
+    - name: Create controllers group with ironic enabled
+      group_by:
+        key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
 - name: Ensure Ironic Python Agent (IPA) images are downloaded and registered
-  hosts: controllers[0]
+  hosts: controllers_with_ironic_enabled_True[0]
+  gather_facts: False
   vars:
     # These are the filenames generated by overcloud-ipa-build.yml.
     ipa_image_name: "ipa"
diff --git a/ansible/provision-net.yml b/ansible/provision-net.yml
index aceb1417119e057baf5251a45d4399a95b56ec7c..7fe844217b54bcad57d6e2791ec4a6b28c82ba95 100644
--- a/ansible/provision-net.yml
+++ b/ansible/provision-net.yml
@@ -1,7 +1,15 @@
 ---
+- name: Check whether Ironic is enabled
+  hosts: controllers
+  tasks:
+    - name: Create controllers group with ironic enabled
+      group_by:
+        key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
+
 - name: Ensure provisioning network and subnet are registered in neutron
   # Only required to run on a single host.
-  hosts: controllers[0]
+  hosts: controllers_with_ironic_enabled_True[0]
+  gather_facts: False
   pre_tasks:
     - name: Validate OpenStack password authentication parameters
       fail:
diff --git a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2
index 0d213eb3eea2e74307b517164ae7afb454458033..d540371c596fd13df8504bb4355ad35a1b5db35f 100644
--- a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2
+++ b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2
@@ -15,7 +15,7 @@
 # Top level {{ group }} group.
 [{{ group }}]
 # These hostnames must be resolvable from your deployment host
-{% for host in groups[group] %}
+{% for host in groups.get(group, []) %}
 {% set host_hv=hostvars[host] %}
 {{ host }}{% for hv_name in kolla_overcloud_inventory_pass_through_host_vars %}{% if hv_name in host_hv %} {{ hv_name }}={{ host_hv[hv_name] }}{% endif %}{% endfor %}
 
diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst
index 09fff0e968613337b9a2c47ed5c9db0f64e789a9..87559ee009366d6de360602b7fc32bcbad01b219 100644
--- a/doc/source/architecture.rst
+++ b/doc/source/architecture.rst
@@ -41,6 +41,9 @@ Network
 Monitoring
     Monitoring host run the control plane and workload monitoring services.
     Currently, kayobe does not deploy any services onto monitoring hosts.
+Virtualised compute hypervisors
+    Virtualised compute hypervisors run the tenant Virtual Machines (VMs) and
+    associated OpenStack services for compute, networking and storage.
 
 Networks
 ========
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..785b57ffc70cfb355f02336bf2d3b4448133b7cf
--- /dev/null
+++ b/doc/source/configuration/index.rst
@@ -0,0 +1,10 @@
+===================
+Configuration Guide
+===================
+
+.. toctree::
+   :maxdepth: 2
+
+   kayobe
+   network
+   kolla-ansible
diff --git a/doc/source/configuration/kayobe.rst b/doc/source/configuration/kayobe.rst
index 5466fa1b8eceda9516c3e0c7464cce35870f2ffb..148f576b5e5f8e21ade05e0661b08989826f49da 100644
--- a/doc/source/configuration/kayobe.rst
+++ b/doc/source/configuration/kayobe.rst
@@ -1,6 +1,6 @@
-=============
-Configuration
-=============
+====================
+Kayobe Configuration
+====================
 
 This section covers configuration of Kayobe.  As an Ansible-based project,
 Kayobe is for the most part configured using YAML files.
diff --git a/doc/source/configuration/network.rst b/doc/source/configuration/network.rst
index 8e9c851651a90059d15cfe4ef699b5d040fcfc9b..4cd8557d8b112030d3aa6989b82192fb9d0a250f 100644
--- a/doc/source/configuration/network.rst
+++ b/doc/source/configuration/network.rst
@@ -594,6 +594,20 @@ a list of names of additional networks to attach.  Alternatively, the list may
 be completely overridden by setting ``monitoring_network_interfaces``.  These
 variables are found in ``${KAYOBE_CONFIG_PATH}/monitoring.yml``.
 
+Virtualised Compute Hosts
+-------------------------
+
+By default, virtualised compute hosts are attached to the following networks:
+
+* overcloud provisioning network
+* internal network
+* storage network
+
+This list may be extended by setting ``compute_extra_network_interfaces`` to a
+list of names of additional networks to attach.  Alternatively, the list may be
+completely overridden by setting ``compute_network_interfaces``.  These
+variables are found in ``${KAYOBE_CONFIG_PATH}/compute.yml``.
+
 Other Hosts
 -----------
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 210101b141d8ccb6a1cdf89b57bbdc2a4e784151..aac175cebea1017164800e8a4a646a66cbcd3214 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -23,9 +23,7 @@ Documentation
    architecture
    installation
    usage
-   configuration/kayobe
-   configuration/network
-   configuration/kolla-ansible
+   configuration/index
    deployment
    upgrading
    administration
diff --git a/doc/source/release-notes.rst b/doc/source/release-notes.rst
index 19520e25c4f85a08726eb1df7139d75c94d28edc..e75b34faa93732362e20f9462d33f28ad1e66cff 100644
--- a/doc/source/release-notes.rst
+++ b/doc/source/release-notes.rst
@@ -24,6 +24,8 @@ Features
   which hosts run the nova compute service for ironic. This may be used to
   avoid the experimental HA nova compute service for ironic, by specifying a
   single host.
+* Adds support for deployment of virtualised compute hosts.  These hosts should
+  be added to the ``[compute]`` group.
 
 Upgrade Notes
 -------------
diff --git a/etc/kayobe/compute.yml b/etc/kayobe/compute.yml
new file mode 100644
index 0000000000000000000000000000000000000000..18fa9a8781b771b232e1217c6f4b21e3f2d501ed
--- /dev/null
+++ b/etc/kayobe/compute.yml
@@ -0,0 +1,101 @@
+---
+###############################################################################
+# Compute node configuration.
+
+# User with which to access the computes via SSH during bootstrap, in order
+# to setup the Kayobe user account.
+#compute_bootstrap_user:
+
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which compute nodes are attached.
+#compute_network_interfaces:
+
+# List of default networks to which compute nodes are attached.
+#compute_default_network_interfaces:
+
+# List of extra networks to which compute nodes are attached.
+#compute_extra_network_interfaces:
+
+###############################################################################
+# Compute node BIOS configuration.
+
+# Dict of compute BIOS options. Format is same as that used by stackhpc.drac
+# role.
+#compute_bios_config:
+
+# Dict of default compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+#compute_bios_config_default:
+
+# Dict of additional compute BIOS options. Format is same as that used by
+# stackhpc.drac role.
+#compute_bios_config_extra:
+
+###############################################################################
+# Compute node RAID configuration.
+
+# List of compute RAID volumes. Format is same as that used by stackhpc.drac
+# role.
+#compute_raid_config:
+
+# List of default compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+#compute_raid_config_default:
+
+# List of additional compute RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+#compute_raid_config_extra:
+
+###############################################################################
+# Compute node LVM configuration.
+
+# List of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+#compute_lvm_groups:
+
+# Default list of compute volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+#compute_lvm_groups_default:
+
+# Additional list of compute volume groups. See mrlesmithjr.manage-lvm role
+# for format.
+#compute_lvm_groups_extra:
+
+# Compute LVM volume group for data. See mrlesmithjr.manage-lvm role for
+# format.
+#compute_lvm_group_data:
+
+# List of disks for use by compute LVM data volume group. Default to an
+# invalid value to require configuration.
+#compute_lvm_group_data_disks:
+
+# List of LVM logical volumes for the data volume group.
+#compute_lvm_group_data_lvs:
+
+# Docker volumes LVM backing volume.
+#compute_lvm_group_data_lv_docker_volumes:
+
+# Size of docker volumes LVM backing volume.
+#compute_lvm_group_data_lv_docker_volumes_size:
+
+# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
+#compute_lvm_group_data_lv_docker_volumes_fs:
+
+###############################################################################
+# Compute node sysctl configuration.
+
+# Dict of sysctl parameters to set.
+#compute_sysctl_parameters:
+
+###############################################################################
+# Compute node user configuration.
+
+# List of users to create. This should be in a format accepted by the
+# singleplatform-eng.users role.
+#compute_users:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/group_vars/compute/network-interfaces b/etc/kayobe/inventory/group_vars/compute/network-interfaces
new file mode 100644
index 0000000000000000000000000000000000000000..421f69d395f5866e735a68765e62bd367f53f061
--- /dev/null
+++ b/etc/kayobe/inventory/group_vars/compute/network-interfaces
@@ -0,0 +1,27 @@
+---
+###############################################################################
+# Network interface definitions for the compute group.
+
+# Overcloud provisioning network IP information.
+# provision_oc_net_interface:
+# provision_oc_net_bridge_ports:
+# provision_oc_net_bond_slaves:
+
+# Internal network IP information.
+# internal_net_interface:
+# internal_net_bridge_ports:
+# internal_net_bond_slaves:
+
+# External network IP information.
+# external_net_interface:
+# external_net_bridge_ports:
+# external_net_bond_slaves:
+
+# Storage network IP information.
+# storage_net_interface:
+# storage_net_bridge_ports:
+# storage_net_bond_slaves:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups
index 02c5956d4b53c19f6b81c0fcea67611893ed56d9..2166e1b387d9fa3abd12a35a0da184f8dca2308f 100644
--- a/etc/kayobe/inventory/groups
+++ b/etc/kayobe/inventory/groups
@@ -15,10 +15,14 @@ controllers
 [monitoring]
 # Empty group to provide declaration of monitoring group.
 
+[compute]
+# Empty group to provide declaration of compute group.
+
 [overcloud:children]
 controllers
 network
 monitoring
+compute
 
 [docker:children]
 # Hosts in this group will have Docker installed.
@@ -26,6 +30,13 @@ seed
 controllers
 network
 monitoring
+compute
+
+###############################################################################
+# Baremetal compute node groups.
+
+[baremetal-compute]
+# Empty group to provide declaration of baremetal-compute group.
 
 ###############################################################################
 # Networking groups.
diff --git a/etc/kayobe/inventory/hosts.example b/etc/kayobe/inventory/hosts.example
index b3dc7a39dda87d800eee1bcc686dd70f0e45bb73..1cce1ce299497dfa9a46a71fbe64b9fddb9665e0 100644
--- a/etc/kayobe/inventory/hosts.example
+++ b/etc/kayobe/inventory/hosts.example
@@ -18,6 +18,9 @@ localhost ansible_connection=local
 # Add controller nodes here if required. These hosts will provide the
 # OpenStack overcloud.
 
+[baremetal-compute]
+# Add baremetal compute nodes here if required.
+
 [mgmt-switches]
 # Add management network switches here if required.