diff --git a/ansible/dev-tools.yml b/ansible/dev-tools.yml
index 5e7bd85b586194653618ade73f728d3830d9f832..d8e86792138d2d143ee15618bedcf323f86a867d 100644
--- a/ansible/dev-tools.yml
+++ b/ansible/dev-tools.yml
@@ -1,5 +1,5 @@
 ---
 - name: Ensure development tools are installed
-  hosts: seed:controllers
+  hosts: seed:overcloud
   roles:
     - role: dev-tools
diff --git a/ansible/disable-glean.yml b/ansible/disable-glean.yml
index b57dbc61fe736d7299936b6d9545468f6e1482a1..0ca5b40bc6ae1c906eb0c3f994a96234546ceb8b 100644
--- a/ansible/disable-glean.yml
+++ b/ansible/disable-glean.yml
@@ -3,6 +3,6 @@
 # servers but gets in the way after this as it tries to enable all network
 # interfaces. In some cases this can lead to timeouts.
 - name: Ensure Glean is disabled and its artifacts are removed
-  hosts: seed:controllers
+  hosts: seed:overcloud
   roles:
     - role: disable-glean
diff --git a/ansible/disable-selinux.yml b/ansible/disable-selinux.yml
index e5f3cad8d545a8f8c2c5585aa577054942c8669d..6e2c1a439d1f4ca602c7c2b83bcc4bbbceeb95ec 100644
--- a/ansible/disable-selinux.yml
+++ b/ansible/disable-selinux.yml
@@ -1,6 +1,6 @@
 ---
 - name: Disable SELinux and reboot if required
-  hosts: controllers:seed
+  hosts: seed:overcloud
   roles:
     - role: disable-selinux
       disable_selinux_reboot_timeout: "{{ 600 if ansible_virtualization_role == 'host' else 300 }}"
diff --git a/ansible/drac-bios.yml b/ansible/drac-bios.yml
index 2b767778c4f72d58fcbbdf4c71475ce03649deea..a832f2eaf06c7bb1bd280dde3d38910b1c79417b 100644
--- a/ansible/drac-bios.yml
+++ b/ansible/drac-bios.yml
@@ -1,6 +1,6 @@
 ---
-- name: Ensure that controller BIOS are configured
-  hosts: controllers
+- name: Ensure that overcloud nodes' BIOS are configured
+  hosts: overcloud
   gather_facts: no
   vars:
     bios_config:
diff --git a/ansible/drac-boot-order.yml b/ansible/drac-boot-order.yml
index 1977008a2926e764a379426267f743b3c4a6e958..52d12ba1a2cf557ffb1bdf9f8c6bd0be8380735e 100644
--- a/ansible/drac-boot-order.yml
+++ b/ansible/drac-boot-order.yml
@@ -1,6 +1,6 @@
 ---
-- name: Ensure that controller boot order is configured
-  hosts: controllers
+- name: Ensure that overcloud nodes' boot order is configured
+  hosts: overcloud
   gather_facts: no
   vars:
     ansible_host: "{{ ipmi_address }}"
diff --git a/ansible/drac-facts.yml b/ansible/drac-facts.yml
index e2bc0e22489a95760b42f854d8b09b7a13731010..235079513db656831c70d468ddef6e12d6eb5039 100644
--- a/ansible/drac-facts.yml
+++ b/ansible/drac-facts.yml
@@ -1,6 +1,6 @@
 ---
-- name: Ensure that controller BIOS are configured
-  hosts: controllers
+- name: Gather and display BIOS and RAID facts from iDRACs
+  hosts: overcloud
   gather_facts: no
   roles:
     # The role simply pulls in the drac_facts module.
diff --git a/ansible/group_vars/all/controllers b/ansible/group_vars/all/controllers
index 7fe0ade8f03b58612317873dbe222e1b12d35363..f09d5dcdb3fc6c2b59cabd7cb892d9b1eb0b8cb5 100644
--- a/ansible/group_vars/all/controllers
+++ b/ansible/group_vars/all/controllers
@@ -6,6 +6,21 @@
 # to setup the Kayobe user account.
 controller_bootstrap_user: "{{ lookup('env', 'USER') }}"
 
+###############################################################################
+# Controller network interface configuration.
+
+# List of default networks to which controller nodes are attached.
+controller_default_network_interfaces: >
+  {{ [provision_oc_net_name,
+      provision_wl_net_name,
+      internal_net_name,
+      external_net_name,
+      storage_net_name,
+      storage_mgmt_net_name] | unique | list }}
+
+# List of extra networks to which controller nodes are attached.
+controller_extra_network_interfaces: []
+
 ###############################################################################
 # Controller node BIOS configuration.
 
diff --git a/ansible/group_vars/all/monitoring b/ansible/group_vars/all/monitoring
new file mode 100644
index 0000000000000000000000000000000000000000..44725924995dc95a2d6b959288efc9c0d586c869
--- /dev/null
+++ b/ansible/group_vars/all/monitoring
@@ -0,0 +1,64 @@
+---
+###############################################################################
+# Monitoring node configuration.
+
+# User with which to access the monitoring nodes via SSH during bootstrap, in
+# order to setup the Kayobe user account.
+monitoring_bootstrap_user: "{{ controller_bootstrap_user }}"
+
+###############################################################################
+# Monitoring node network interface configuration.
+
+# List of default networks to which monitoring nodes are attached.
+monitoring_default_network_interfaces: >
+  {{ [provision_oc_net_name,
+      internal_net_name,
+      external_net_name] | unique | list }}
+
+# List of extra networks to which monitoring nodes are attached.
+monitoring_extra_network_interfaces: []
+
+###############################################################################
+# Monitoring node BIOS configuration.
+
+# Dict of monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+monitoring_bios_config: "{{ monitoring_bios_config_default | combine(monitoring_bios_config_extra) }}"
+
+# Dict of default monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+monitoring_bios_config_default: "{{ controller_bios_config_default }}"
+
+# Dict of additional monitoring node BIOS options. Format is same as that used
+# by stackhpc.drac role.
+monitoring_bios_config_extra: "{{ controller_bios_config_extra }}"
+
+###############################################################################
+# Monitoring node RAID configuration.
+
+# List of monitoring node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+monitoring_raid_config: "{{ monitoring_raid_config_default + monitoring_raid_config_extra }}"
+
+# List of default monitoring node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+monitoring_raid_config_default: "{{ controller_raid_config_default }}"
+
+# List of additional monitoring node RAID volumes. Format is same as that used
+# by stackhpc.drac role.
+monitoring_raid_config_extra: "{{ controller_raid_config_extra }}"
+
+###############################################################################
+# Monitoring node LVM configuration.
+
+# List of monitoring node volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+monitoring_lvm_groups: "{{ monitoring_lvm_groups_default + monitoring_lvm_groups_extra }}"
+
+# Default list of monitoring node volume groups. See mrlesmithjr.manage-lvm
+# role for format.
+monitoring_lvm_groups_default: "{{ controller_lvm_groups_default }}"
+
+# Additional list of monitoring node volume groups. See mrlesmithjr.manage-lvm
+# role for format.
+monitoring_lvm_groups_extra: "{{ controller_lvm_groups_extra }}"
diff --git a/ansible/group_vars/all/overcloud b/ansible/group_vars/all/overcloud
new file mode 100644
index 0000000000000000000000000000000000000000..7d4bbf192829d7aab49037731a9c1aec2a3b3597
--- /dev/null
+++ b/ansible/group_vars/all/overcloud
@@ -0,0 +1,17 @@
+---
+###############################################################################
+# Overcloud configuration.
+
+# Default Ansible group for overcloud hosts if not present in
+# overcloud_group_hosts_map.
+overcloud_group_default: controllers
+
+# List of names of Ansible groups for overcloud hosts.
+overcloud_groups:
+  - controllers
+  - monitoring
+
+# Dict mapping overcloud Ansible group names to lists of hosts in the group.
+# As a special case, the group 'ignore' can be used to specify hosts that
+# should not be added to the inventory.
+overcloud_group_hosts_map: {}
diff --git a/ansible/group_vars/controllers/ansible-host b/ansible/group_vars/controllers/ansible-host
deleted file mode 100644
index 9bf31705671716b7d583767c34ab09adbdf1d333..0000000000000000000000000000000000000000
--- a/ansible/group_vars/controllers/ansible-host
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# Host/IP with which to access the controllers via SSH.
-ansible_host: "{{ provision_oc_net_name | net_ip }}"
diff --git a/ansible/group_vars/controllers/bios b/ansible/group_vars/controllers/bios
new file mode 100644
index 0000000000000000000000000000000000000000..5a2fa74dfe007af2f7e20257f53c3c6dffe2c003
--- /dev/null
+++ b/ansible/group_vars/controllers/bios
@@ -0,0 +1,7 @@
+---
+###############################################################################
+# Controller node BIOS configuration.
+
+# Dict of monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+bios_config: "{{ controller_bios_config }}"
diff --git a/ansible/group_vars/controllers/network b/ansible/group_vars/controllers/network
index c0e90a46d7b359bb0d8a146b4a823ec30ebddc9c..3fd920c4f1153de9e29263f3d167a08cddd552cd 100644
--- a/ansible/group_vars/controllers/network
+++ b/ansible/group_vars/controllers/network
@@ -6,15 +6,3 @@
 network_interfaces: >
   {{ (controller_default_network_interfaces +
       controller_extra_network_interfaces) | unique | list }}
-
-# List of default networks to which controller nodes are attached.
-controller_default_network_interfaces: >
-  {{ [provision_oc_net_name,
-      provision_wl_net_name,
-      internal_net_name,
-      external_net_name,
-      storage_net_name,
-      storage_mgmt_net_name] | unique | list }}
-
-# List of extra networks to which controller nodes are attached.
-controller_extra_network_interfaces: []
diff --git a/ansible/group_vars/controllers/raid b/ansible/group_vars/controllers/raid
new file mode 100644
index 0000000000000000000000000000000000000000..77b47f31431a77e16a917d8cd8ae5985ebc9e269
--- /dev/null
+++ b/ansible/group_vars/controllers/raid
@@ -0,0 +1,7 @@
+---
+###############################################################################
+# Controller node RAID configuration.
+
+# List of controller node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+raid_config: "{{ controller_raid_config }}"
diff --git a/ansible/group_vars/monitoring/ansible-user b/ansible/group_vars/monitoring/ansible-user
new file mode 100644
index 0000000000000000000000000000000000000000..28c2d9feadd21ee7da8afeaaa86d33eb806145f6
--- /dev/null
+++ b/ansible/group_vars/monitoring/ansible-user
@@ -0,0 +1,7 @@
+---
+# User with which to access the monitoring nodes via SSH.
+ansible_user: "{{ kayobe_ansible_user }}"
+
+# User with which to access the monitoring nodes before the kayobe_ansible_user
+# account has been created.
+bootstrap_user: "{{ monitoring_bootstrap_user }}"
diff --git a/ansible/group_vars/monitoring/bios b/ansible/group_vars/monitoring/bios
new file mode 100644
index 0000000000000000000000000000000000000000..af64ec9c7d84d89558d699b7f9f8c3cd72182ac3
--- /dev/null
+++ b/ansible/group_vars/monitoring/bios
@@ -0,0 +1,10 @@
+---
+###############################################################################
+# Monitoring node BIOS configuration.
+
+# Dict of monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+bios_config: >
+  {{ controller_bios_config
+     if inventory_hostname in groups['controllers'] else
+     monitoring_bios_config }}
diff --git a/ansible/group_vars/monitoring/lvm b/ansible/group_vars/monitoring/lvm
new file mode 100644
index 0000000000000000000000000000000000000000..4198c2ac810bdefc1b6caa1c0156fa07eac9a8c6
--- /dev/null
+++ b/ansible/group_vars/monitoring/lvm
@@ -0,0 +1,9 @@
+---
+###############################################################################
+# Monitoring node LVM configuration.
+
+# List of LVM volume groups.
+lvm_groups: >
+  {{ controller_lvm_groups
+     if inventory_hostname in groups['controllers'] else
+     monitoring_lvm_groups }}
diff --git a/ansible/group_vars/monitoring/network b/ansible/group_vars/monitoring/network
new file mode 100644
index 0000000000000000000000000000000000000000..6fcde222af8b7d695e4955dcfbc8e22ab728d61c
--- /dev/null
+++ b/ansible/group_vars/monitoring/network
@@ -0,0 +1,11 @@
+---
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which these nodes are attached.
+network_interfaces: >
+  {{ (controller_default_network_interfaces +
+      controller_extra_network_interfaces) | unique | list
+     if inventory_hostname in groups['controllers'] else
+     (monitoring_default_network_interfaces +
+      monitoring_extra_network_interfaces) | unique | list }}
diff --git a/ansible/group_vars/monitoring/raid b/ansible/group_vars/monitoring/raid
new file mode 100644
index 0000000000000000000000000000000000000000..8e8ad890bee6d7880d79bacd563692ffbd66d645
--- /dev/null
+++ b/ansible/group_vars/monitoring/raid
@@ -0,0 +1,10 @@
+---
+###############################################################################
+# Monitoring node RAID configuration.
+
+# List of monitoring node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+raid_config: >
+  {{ controller_raid_config
+     if inventory_hostname in groups['controllers'] else
+     monitoring_raid_config }}
diff --git a/ansible/group_vars/overcloud/ansible-host b/ansible/group_vars/overcloud/ansible-host
new file mode 100644
index 0000000000000000000000000000000000000000..df6d5357598518ccaf4f08f57d6441150b0a83c2
--- /dev/null
+++ b/ansible/group_vars/overcloud/ansible-host
@@ -0,0 +1,3 @@
+---
+# Host/IP with which to access the overcloud nodes via SSH.
+ansible_host: "{{ provision_oc_net_name | net_ip }}"
diff --git a/ansible/ip-allocation.yml b/ansible/ip-allocation.yml
index c3f5823858f93f1b8530a8f08fbf205269b5d007..96c7932bcb889498398d7bd1f4eb5bcc84040b56 100644
--- a/ansible/ip-allocation.yml
+++ b/ansible/ip-allocation.yml
@@ -1,6 +1,6 @@
 ---
 - name: Ensure IP addresses are allocated
-  hosts: seed:controllers
+  hosts: seed:overcloud
   gather_facts: no
   pre_tasks:
     - name: Initialise the IP allocations fact
diff --git a/ansible/ip-routing.yml b/ansible/ip-routing.yml
index ce16e1928002ea8b609784fd5bed3a22f9482dc2..cbb1fac025dbd8563ab02e7f3b055290c8616f4a 100644
--- a/ansible/ip-routing.yml
+++ b/ansible/ip-routing.yml
@@ -2,6 +2,6 @@
 # Enable IP routing in the kernel.
 
 - name: Ensure IP routing is enabled
-  hosts: seed:controllers
+  hosts: seed:overcloud
   roles:
     - role: ip-routing
diff --git a/ansible/kayobe-ansible-user.yml b/ansible/kayobe-ansible-user.yml
index 1c4a015179ccf8c46853a1e2028975b16371755a..50612c3c06ced46c8bbeaa1da19c4efe0481ab60 100644
--- a/ansible/kayobe-ansible-user.yml
+++ b/ansible/kayobe-ansible-user.yml
@@ -1,6 +1,6 @@
 ---
 - name: Ensure the Kayobe Ansible user account exists
-  hosts: seed:controllers
+  hosts: seed:overcloud
   vars:
     ansible_user: "{{ bootstrap_user }}"
   tasks:
diff --git a/ansible/kolla-bifrost-hostvars.yml b/ansible/kolla-bifrost-hostvars.yml
index 6f78acad46d19ca06d6e27d720bb1f01d90f6e5b..cf4e22718d6ce5b988fccc9e8a11cbc65bd659d6 100644
--- a/ansible/kolla-bifrost-hostvars.yml
+++ b/ansible/kolla-bifrost-hostvars.yml
@@ -1,7 +1,7 @@
 ---
 # Update the Bifrost inventory with the IP allocation and other variables.
 
-- name: Ensure the Bifrost controller inventory is initialised
+- name: Ensure the Bifrost overcloud inventory is initialised
   hosts: seed
   gather_facts: no
   tasks:
@@ -20,8 +20,8 @@
         force: True
       become: True
 
-- name: Ensure the Bifrost controller inventory is populated
-  hosts: controllers
+- name: Ensure the Bifrost overcloud inventory is populated
+  hosts: overcloud
   gather_facts: no
   vars:
     seed_host: "{{ groups['seed'][0] }}"
diff --git a/ansible/lvm.yml b/ansible/lvm.yml
index 7bd3f716a37468b0587a590f802cb645f5b88daf..fa09fe07bdbe4d18caf59a9a285bc85d86c9e9a5 100644
--- a/ansible/lvm.yml
+++ b/ansible/lvm.yml
@@ -1,6 +1,6 @@
 ---
 - name: Ensure LVM configuration is applied
-  hosts: seed:controllers
+  hosts: seed:overcloud
   pre_tasks:
     - name: Fail if the LVM physical disks have not been configured
       fail:
diff --git a/ansible/network.yml b/ansible/network.yml
index 41a01566dc3c9b4d2f03f54217865d2e70916e76..bb776433f55a128f53eff4789045e711673c8003 100644
--- a/ansible/network.yml
+++ b/ansible/network.yml
@@ -1,6 +1,6 @@
 ---
 - name: Ensure networking is configured
-  hosts: seed:controllers
+  hosts: seed:overcloud
   tags:
     - config
   vars:
diff --git a/ansible/ntp.yml b/ansible/ntp.yml
index c8f923a9274063a08727c74a7c88339afbf05857..e3b8d7ccec2a17f958988a8c6e1ea58bd8f520b8 100644
--- a/ansible/ntp.yml
+++ b/ansible/ntp.yml
@@ -1,6 +1,6 @@
 ---
 - name: Ensure NTP is installed and configured
-  hosts: seed:controllers
+  hosts: seed:overcloud
   roles:
     - role: yatesr.timezone
       become: True
diff --git a/ansible/overcloud-bios-raid.yml b/ansible/overcloud-bios-raid.yml
index 31b9d468eeb12a768596f52985601fd29d1736d3..4053a74715c50a909c945b26ea268cb99f836f63 100644
--- a/ansible/overcloud-bios-raid.yml
+++ b/ansible/overcloud-bios-raid.yml
@@ -6,30 +6,30 @@
 # set the ironic nodes' to maintenance mode to prevent ironic from managing
 # their power states.
 
-- name: Group controller hosts by their BMC type
-  hosts: controllers
+- name: Group overcloud nodes by their BMC type
+  hosts: overcloud
   gather_facts: no
   vars:
     # List of BMC types supporting BIOS and RAID configuration.
     supported_bmc_types:
       - idrac
   tasks:
-    - name: Fail if controller has BIOS and/or RAID configuration and BMC type is not supported
+    - name: Fail if node has BIOS and/or RAID configuration and BMC type is not supported
       fail:
         msg: >
-          Controller has BIOS and/or RAID configuration but BMC type
+          Node has BIOS and/or RAID configuration but BMC type
           {% if bmc_type is undefined %}is not defined{% else %}{{ bmc_type }}
           is not supported{% endif %}.
       when:
-        - "{{ controller_bios_config or controller_raid_config }}"
+        - "{{ bios_config or raid_config }}"
         - "{{ bmc_type is undefined or bmc_type not in supported_bmc_types }}"
 
-    - name: Group controller hosts by their BMC type
+    - name: Group overcloud hosts by their BMC type
       group_by:
-        key: "controllers_with_bmcs_of_type_{{ bmc_type | default('unknown') }}"
+        key: "overcloud_with_bmcs_of_type_{{ bmc_type | default('unknown') }}"
 
-- name: Check whether any changes to controller BIOS and RAID configuration are required
-  hosts: controllers_with_bmcs_of_type_idrac
+- name: Check whether any changes to nodes' BIOS and RAID configuration are required
+  hosts: overcloud_with_bmcs_of_type_idrac
   gather_facts: no
   vars:
     # Set this to False to avoid rebooting the nodes after configuration.
@@ -39,22 +39,22 @@
       drac_address: "{{ ipmi_address }}"
       drac_username: "{{ ipmi_username }}"
       drac_password: "{{ ipmi_password }}"
-      drac_bios_config: "{{ controller_bios_config }}"
-      drac_raid_config: "{{ controller_raid_config }}"
+      drac_bios_config: "{{ bios_config }}"
+      drac_raid_config: "{{ raid_config }}"
       drac_check_mode: True
   tasks:
     - name: Set a fact about whether the configuration changed
       set_fact:
         bios_or_raid_change: "{{ drac_result | changed }}"
 
-- name: Ensure that controller BIOS and RAID volumes are configured
-  hosts: controllers_with_bmcs_of_type_idrac
+- name: Ensure that overcloud BIOS and RAID volumes are configured
+  hosts: overcloud_with_bmcs_of_type_idrac
   gather_facts: no
   vars:
     # Set this to False to avoid rebooting the nodes after configuration.
     drac_reboot: True
   pre_tasks:
-    - name: Set the controller nodes' maintenance mode
+    - name: Set the overcloud nodes' maintenance mode
       command: >
         docker exec bifrost_deploy
         bash -c '. env-vars &&
@@ -82,12 +82,12 @@
       drac_address: "{{ ipmi_address }}"
       drac_username: "{{ ipmi_username }}"
       drac_password: "{{ ipmi_password }}"
-      drac_bios_config: "{{ controller_bios_config }}"
-      drac_raid_config: "{{ controller_raid_config }}"
+      drac_bios_config: "{{ bios_config }}"
+      drac_raid_config: "{{ raid_config }}"
       when: "{{ bios_or_raid_change | bool }}"
 
   tasks:
-    - name: Unset the controller nodes' maintenance mode
+    - name: Unset the overcloud nodes' maintenance mode
       command: >
         docker exec bifrost_deploy
         bash -c '. env-vars &&
diff --git a/ansible/overcloud-deprovision.yml b/ansible/overcloud-deprovision.yml
index 7a28aef61ccbd7ceb8d2e8bb306b0459355063d4..249c1f4a1887b837ba173edcc50db183c77f95a1 100644
--- a/ansible/overcloud-deprovision.yml
+++ b/ansible/overcloud-deprovision.yml
@@ -1,10 +1,10 @@
 ---
 # Use bifrost to deprovision the overcloud nodes.
 
-- name: Ensure the overcloud controllers are deprovisioned
-  hosts: controllers
+- name: Ensure the overcloud nodes are deprovisioned
+  hosts: overcloud
   vars:
-    # Set to False to avoid waiting for the controllers to become active.
+    # Set to False to avoid waiting for the nodes to become active.
     wait_available: True
     wait_available_timeout: 600
     wait_available_interval: 10
diff --git a/ansible/overcloud-hardware-inspect.yml b/ansible/overcloud-hardware-inspect.yml
index d4b93eef90678f36d9f4f04a81cda33391a6f752..273b92e856774aba8599e3e0c2d87c3b9de0e776 100644
--- a/ansible/overcloud-hardware-inspect.yml
+++ b/ansible/overcloud-hardware-inspect.yml
@@ -1,10 +1,10 @@
 ---
 # Use bifrost to inspect the overcloud nodes' hardware.
 
-- name: Ensure the overcloud controller hardware is inspected
-  hosts: controllers
+- name: Ensure the overcloud nodes' hardware is inspected
+  hosts: overcloud
   vars:
-    # Set to False to avoid waiting for the controllers to become active.
+    # Set to False to avoid waiting for the nodes to become active.
     wait_inspected: True
     wait_inspected_timeout: 600
     wait_inspected_interval: 10
@@ -133,7 +133,7 @@
       when:
         - "{{ wait_inspected | bool }}"
 
-    - name: Fail if any of the controllers are not manageable
+    - name: Fail if any of the nodes are not manageable
       fail:
         msg: >
           Ironic node for {{ inventory_hostname }} is in an unexpected
diff --git a/ansible/overcloud-inventory-discover.yml b/ansible/overcloud-inventory-discover.yml
index 98bc52a586b714ac18104373cbe7bfd33d5f4a5f..3189971e316141575de1eb8f5a31d19e2eb17a23 100644
--- a/ansible/overcloud-inventory-discover.yml
+++ b/ansible/overcloud-inventory-discover.yml
@@ -2,7 +2,7 @@
 # Gather an inventory of nodes from the seed's Ironic service. Use this to
 # generate an Ansible inventory for Kayobe.
 
-- name: Ensure the controller Ansible inventory is populated
+- name: Ensure the overcloud Ansible inventory is populated
   hosts: seed
   tasks:
     - name: Gather the Ironic node inventory using Bifrost
@@ -18,20 +18,36 @@
       set_fact:
         ironic_inventory: "{{ inventory_result.stdout | from_json }}"
 
-    - name: Ensure Kayobe controller inventory exists
+    - name: Ensure Kayobe overcloud inventory exists
       local_action:
         module: copy
         content: |
           # Managed by Ansible - do not edit.
-          # This is the Kayobe controller inventory, autogenerated from the seed
+          # This is the Kayobe overcloud inventory, autogenerated from the seed
           # node's Ironic inventory.
 
-          [controllers]
+          {# Build a list of all hosts with explicit mappings. #}
+          {% set all_mapped_hosts = [] %}
+          {% for hosts in overcloud_group_hosts_map.values() %}
+          {% set _ = all_mapped_hosts.extend(hosts) %}
+          {% endfor %}
+          {% set ignore_hosts = overcloud_group_hosts_map.get("ignore", []) %}
+
+          {# Add a section for each group. #}
+          {% for group in overcloud_groups %}
+          [{{ group }}]
+          {% set group_hosts = overcloud_group_hosts_map.get(group, []) %}
           {% for host in ironic_inventory.baremetal.hosts %}
+          {% if (host in group_hosts or
+                 (group == overcloud_group_default and host not in all_mapped_hosts))
+                and host not in ignore_hosts %}
           {% set hostvars=ironic_inventory._meta.hostvars[host] %}
           {% set ipmi_address=hostvars.driver_info.ipmi_address | default %}
           {% set system_vendor=hostvars.extra.system_vendor | default %}
           {% set bmc_type=system_vendor | bmc_type_from_system_vendor %}
           {{ host }} ipmi_address={{ ipmi_address }} bmc_type={{ bmc_type }}
+          {% endif %}
+          {% endfor %}
+
           {% endfor %}
-        dest: "{{ kayobe_config_path }}/inventory/controllers"
+        dest: "{{ kayobe_config_path }}/inventory/overcloud"
diff --git a/ansible/overcloud-provision.yml b/ansible/overcloud-provision.yml
index 851ff73794005ff7eed6d62d78850fa7f6ad3a75..1c0124645b04158101a361d2455f018424c28402 100644
--- a/ansible/overcloud-provision.yml
+++ b/ansible/overcloud-provision.yml
@@ -1,14 +1,14 @@
 ---
 # Use bifrost to provision the overcloud nodes with a base OS.
 
-- name: Ensure the overcloud controllers are provisioned
-  hosts: controllers
+- name: Ensure the overcloud nodes are provisioned
+  hosts: overcloud
   vars:
-    # Set to False to avoid waiting for the controllers to become active.
+    # Set to False to avoid waiting for the nodes to become active.
     wait_active: True
     wait_active_timeout: 600
     wait_active_interval: 10
-    # Set to False to avoid waiting for the controllers to be accessible via
+    # Set to False to avoid waiting for the nodes to be accessible via
     # SSH.
     wait_ssh: True
     wait_ssh_timeout: 600
@@ -133,7 +133,7 @@
       with_items:
         - "{{ hostvars[groups['seed'][0]].ansible_host }}"
       # We execute this only once, allowing the Bifrost Ansible to handle
-      # multiple controllers.
+      # multiple nodes.
       run_once: True
 
     - name: Wait for the ironic node to become active
@@ -171,7 +171,7 @@
         - "{{ wait_active | bool }}"
         - "{{ initial_provision_state != 'active' }}"
 
-    - name: Fail if any of the controllers are not available
+    - name: Fail if any of the nodes are not available
       fail:
         msg: >
           Ironic node for {{ inventory_hostname }} is in an unexpected
@@ -182,7 +182,7 @@
         - "{{ initial_provision_state != 'active' }}"
         - "{{ final_provision_state != 'active' }}"
 
-    - name: Wait for SSH access to the controllers
+    - name: Wait for SSH access to the nodes
       local_action:
         module: wait_for
         host: "{{ ansible_host }}"
diff --git a/ansible/roles/kolla-ansible/templates/overcloud.j2 b/ansible/roles/kolla-ansible/templates/overcloud.j2
index 670a4a229c57dd549f014befb4ed2adfc94d7787..b71ef71acea6787faa4cd444c1f432a32e12e8eb 100644
--- a/ansible/roles/kolla-ansible/templates/overcloud.j2
+++ b/ansible/roles/kolla-ansible/templates/overcloud.j2
@@ -27,8 +27,17 @@ controllers
 
 [compute:children]
 
-[monitoring:children]
-controllers
+[monitoring]
+# These hostnames must be resolvable from your deployment host
+{% for monitoring_host in groups['monitoring'] %}
+{% set monitoring_hv=hostvars[monitoring_host] %}
+{{ monitoring_host }}{% if "ansible_host" in monitoring_hv %}    ansible_host={{ monitoring_hv["ansible_host"] }}{% endif %}
+
+{% endfor %}
+
+[monitoring:vars]
+ansible_user=kolla
+ansible_become=true
 
 [storage:children]
 controllers
diff --git a/ansible/snat.yml b/ansible/snat.yml
index 043e2466a0a500fcfabf8aa6d9797e1d05304580..a2f5a2c3c268b9e025703c282b71a320afd39909 100644
--- a/ansible/snat.yml
+++ b/ansible/snat.yml
@@ -2,7 +2,7 @@
 # Enable SNAT using iptables.
 
 - name: Ensure SNAT is configured
-  hosts: seed:controllers
+  hosts: seed:overcloud
   vars:
     snat_rules:
       - interface: "{{ ansible_default_ipv4.interface }}"
diff --git a/ansible/wipe-disks.yml b/ansible/wipe-disks.yml
index 415edb2aaaf9414a11a28c98b1a1b3fc704841e9..c015ef2550ea7add1408936077c490eb23214c88 100644
--- a/ansible/wipe-disks.yml
+++ b/ansible/wipe-disks.yml
@@ -7,6 +7,6 @@
 # any LVM or file system state from them.
 
 - name: Ensure that all unmounted block devices are wiped
-  hosts: seed:controllers
+  hosts: seed:overcloud
   roles:
     - role: wipe-disks
diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups
index 0212af5d460bc6a840a33518b83fed360f6aa0f9..b0251ef341ee8202ced4ed9e2157aeca62898dac 100644
--- a/etc/kayobe/inventory/groups
+++ b/etc/kayobe/inventory/groups
@@ -7,6 +7,13 @@
 [controllers]
 # Empty group to provide declaration of controllers group.
 
+[monitoring]
+# Empty group to provide declaration of monitoring group.
+
+[overcloud:children]
+controllers
+monitoring
+
 [docker:children]
 # Hosts in this group will have Docker installed.
 seed
diff --git a/etc/kayobe/monitoring.yml b/etc/kayobe/monitoring.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d9a69ced34ff9c2d642846833bf2ec39734e1f91
--- /dev/null
+++ b/etc/kayobe/monitoring.yml
@@ -0,0 +1,56 @@
+---
+###############################################################################
+# Monitoring node configuration.
+
+# User with which to access the monitoring nodes via SSH during bootstrap, in
+# order to setup the Kayobe user account.
+#monitoring_bootstrap_user:
+
+###############################################################################
+# Monitoring node BIOS configuration.
+
+# Dict of monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+#monitoring_bios_config: 
+
+# Dict of default monitoring node BIOS options. Format is same as that used by
+# stackhpc.drac role.
+#monitoring_bios_config_default:
+
+# Dict of additional monitoring node BIOS options. Format is same as that used
+# by stackhpc.drac role.
+#monitoring_bios_config_extra:
+
+###############################################################################
+# Monitoring node RAID configuration.
+
+# List of monitoring node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+#monitoring_raid_config:
+
+# List of default monitoring node RAID volumes. Format is same as that used by
+# stackhpc.drac role.
+#monitoring_raid_config_default:
+
+# List of additional monitoring node RAID volumes. Format is same as that used
+# by stackhpc.drac role.
+#monitoring_raid_config_extra:
+
+###############################################################################
+# Monitoring node LVM configuration.
+
+# List of monitoring node volume groups. See mrlesmithjr.manage-lvm role for
+# format.
+#monitoring_lvm_groups:
+
+# Default list of monitoring node volume groups. See mrlesmithjr.manage-lvm
+# role for format.
+#monitoring_lvm_groups_default:
+
+# Additional list of monitoring node volume groups. See mrlesmithjr.manage-lvm
+# role for format.
+#monitoring_lvm_groups_extra:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/overcloud.yml b/etc/kayobe/overcloud.yml
new file mode 100644
index 0000000000000000000000000000000000000000..29eb8515a11a89c1d5c31b93b6721dfd8e4b1567
--- /dev/null
+++ b/etc/kayobe/overcloud.yml
@@ -0,0 +1,14 @@
+---
+###############################################################################
+# Overcloud configuration.
+
+# Default Ansible group for overcloud hosts.
+#overcloud_group_default:
+
+# List of names of Ansible groups for overcloud hosts.
+#overcloud_groups:
+
+# Dict mapping overcloud Ansible group names to lists of hosts in the group.
+# As a special case, the group 'ignore' can be used to specify hosts that
+# should not be added to the inventory.
+#overcloud_group_hosts_map:
diff --git a/kayobe/cli/commands.py b/kayobe/cli/commands.py
index d23bdb369e2d9bcb15c53d75cdd057dc21b46547..2f0b777040f9528c389208ef63995f30a381e1b2 100644
--- a/kayobe/cli/commands.py
+++ b/kayobe/cli/commands.py
@@ -373,7 +373,8 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
     def take_action(self, parsed_args):
         self.app.LOG.debug("Configuring overcloud host OS")
         ansible_user = self.run_kayobe_config_dump(
-            parsed_args, host="controllers[0]", var_name="kayobe_ansible_user")
+            parsed_args, var_name="kayobe_ansible_user")
+        ansible_user = ansible_user.values()[0]
         playbooks = _build_playbook_list(
             "ip-allocation", "ssh-known-host", "kayobe-ansible-user")
         if parsed_args.wipe_disks:
@@ -381,12 +382,12 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
         playbooks += _build_playbook_list(
             "dev-tools", "disable-selinux", "network", "disable-glean", "ntp",
             "lvm")
-        self.run_kayobe_playbooks(parsed_args, playbooks, limit="controllers")
+        self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
         extra_vars = {"ansible_user": ansible_user}
         self.run_kolla_ansible_overcloud(parsed_args, "bootstrap-servers",
                                          extra_vars=extra_vars)
         playbooks = _build_playbook_list("kolla-host", "docker")
-        self.run_kayobe_playbooks(parsed_args, playbooks, limit="controllers")
+        self.run_kayobe_playbooks(parsed_args, playbooks, limit="overcloud")
 
 
 class OvercloudServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,