diff --git a/.gitignore b/.gitignore
index 0bc879324ed03bbeb2fdda7a532e21a712d9af44..4983b5c5612a977b0c11724443f1f9dd5f848246 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,13 @@ ansible/*.retry
 # Others
 .DS_Store
 .vimrc
+
+# Ansible Galaxy roles
+ansible/roles/ahuffman.resolv/
+ansible/roles/jriguera.configdrive/
+ansible/roles/MichaelRigart.interfaces/
+ansible/roles/resmo.ntp/
+ansible/roles/yatesr.timezone/
+
+# Virtualenv
+ansible/kolla-venv
diff --git a/PROBLEMS b/PROBLEMS
index a3cfd172cdcb12451256b09c1838c581dfe0c023..f8211bbdb4651a8937dfb1d314099055b73ec436 100644
--- a/PROBLEMS
+++ b/PROBLEMS
@@ -4,6 +4,8 @@ ansible PTY allocation request failed
 In /var/log/secure: Unable to open pty: No such file or directory
 none            /dev/pts        devpts        gid=5,mode=620    0 0
 
+Seems to be when using docker cp.
+
 Kolla patches
 =============
 
@@ -13,3 +15,38 @@ Ironic inspector
 ================
 
 Failed to start due to iptables error.
+See https://bugs.launchpad.net/kolla/+bug/1624457.
+
+Bare metal provisioning
+=======================
+
+- Neutron external network needs configuring and an IP.
+    - install bridge-utils
+    - create br-eth0 with ip, eth0 without IP
+    - create veth pair, set up
+    - plug one end into br-eth0
+    - set neutron_external_interface=patch-br-ex in globals.yml
+
+- Provisioning network different from API network.
+  Likely we can use the same network for this in future.
+    - added to /etc/kolla/ironic-conductor/ironic.conf:
+      [DEFAULT]
+      api_url=http://<provision_ip>:6385
+      [pxe]
+      tftp_server=<provision_ip>
+
+    - add to /etc/kolla/haproxy/haproxy.cfg:
+      listen ironic_pxe_api
+      bind 10.122.100.252:6385
+      server stg-alaska 10.121.100.252:6385 check inter 2000 rise 2 fall 5
+
+- iscsi_tcp modprobe required
+
+- ironic.conf: [agent]deploy_logs_local_path=/var/log/kolla/ironic/deploy
+
+Bifrost
+=======
+
+- Set log_dir=/var/log/kolla/ironic in ironic.conf
+- Create kolla_logs/ironic, chown ironic:ironic
+- os_ironic module will not access root_device property.
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 0000000000000000000000000000000000000000..113f47253d241de555b5511a121f2e3a862c0c31
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,71 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# All Vagrant configuration is done below. The "2" in Vagrant.configure
+# configures the configuration version (we support older styles for
+# backwards compatibility). Please don't change it unless you know what
+# you're doing.
+Vagrant.configure("2") do |config|
+  # The most common configuration options are documented and commented below.
+  # For a complete reference, please see the online documentation at
+  # https://docs.vagrantup.com.
+
+  # Every Vagrant development environment requires a box. You can search for
+  # boxes at https://atlas.hashicorp.com/search.
+  config.vm.box = "centos/7"
+
+  # Disable automatic box update checking. If you disable this, then
+  # boxes will only be checked for updates when the user runs
+  # `vagrant box outdated`. This is not recommended.
+  # config.vm.box_check_update = false
+
+  # Create a forwarded port mapping which allows access to a specific port
+  # within the machine from a port on the host machine. In the example below,
+  # accessing "localhost:8080" will access port 80 on the guest machine.
+  # config.vm.network "forwarded_port", guest: 80, host: 8080
+
+  # Create a private network, which allows host-only access to the machine
+  # using a specific IP.
+  # config.vm.network "private_network", ip: "192.168.33.10"
+
+  # Create a public network, which generally matched to bridged network.
+  # Bridged networks make the machine appear as another physical device on
+  # your network.
+  # config.vm.network "public_network"
+
+  # Share an additional folder to the guest VM. The first argument is
+  # the path on the host to the actual folder. The second argument is
+  # the path on the guest to mount the folder. And the optional third
+  # argument is a set of non-required options.
+  # config.vm.synced_folder "../data", "/vagrant_data"
+
+  # Provider-specific configuration so you can fine-tune various
+  # backing providers for Vagrant. These expose provider-specific options.
+  # Example for VirtualBox:
+  #
+  config.vm.provider "virtualbox" do |vb|
+    # Display the VirtualBox GUI when booting the machine
+    #vb.gui = true
+  
+    # Customize the amount of memory on the VM:
+    vb.memory = "4096"
+  end
+  #
+  # View the documentation for the provider you are using for more
+  # information on available options.
+
+  # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
+  # such as FTP and Heroku are also available. See the documentation at
+  # https://docs.vagrantup.com/v2/push/atlas.html for more information.
+  # config.push.define "atlas" do |push|
+  #   push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
+  # end
+
+  # Enable provisioning with a shell script. Additional provisioners such as
+  # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
+  # documentation for more information about their specific syntax and use.
+  # config.vm.provision "shell", inline: <<-SHELL
+  #   apt-get update
+  #   apt-get install -y apache2
+  # SHELL
+end
diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8e9165d8497f7c82219ca4c2d9f52b2962252936
--- /dev/null
+++ b/ansible/bootstrap.yml
@@ -0,0 +1,5 @@
+---
+- name: Ensure configuration management host is bootstrapped
+  hosts: config-mgmt
+  roles:
+    - role: bootstrap
diff --git a/ansible/disable-selinux.yml b/ansible/disable-selinux.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5a1e93987d4fdfa06aedd22d7da6f2e204ea6b09
--- /dev/null
+++ b/ansible/disable-selinux.yml
@@ -0,0 +1,5 @@
+---
+- name: Disable SELinux and reboot if required
+  hosts: controllers:seed
+  roles:
+    - role: disable-selinux
diff --git a/ansible/docker.yml b/ansible/docker.yml
new file mode 100644
index 0000000000000000000000000000000000000000..698896042ec5f93cc3a4a9354eba9e36866ce7c5
--- /dev/null
+++ b/ansible/docker.yml
@@ -0,0 +1,5 @@
+---
+- name: Ensure docker is configured
+  hosts: docker
+  roles:
+    - role: docker
diff --git a/ansible/dump-config.yml b/ansible/dump-config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3443deb4be3496379b2c5ded972374878aa143df
--- /dev/null
+++ b/ansible/dump-config.yml
@@ -0,0 +1,22 @@
+---
+- hosts: all
+  gather_facts: "{{ gather_facts | default(False) }}"
+  vars:
+    dump_config_path: /tmp/kayobe-dump-config
+  tasks:
+    - name: Create configuration dump directory
+      file:
+        path: "{{ dump_config_path }}"
+        state: directory
+
+    - name: Write host config to file
+      local_action:
+        module: copy
+        content: "{{ hostvars[inventory_hostname] | to_nice_yaml }}"
+        dest: "{{ dump_config_path }}/{{ inventory_hostname }}.yml"
+
+    - name: Write merged config to file
+      local_action:
+        module: copy
+        content: "{{ hostvars | merge_config | to_nice_yaml }}"
+        dest: "{{ dump_config_path }}/merged.yml
diff --git a/ansible/filter_plugins/networks.py b/ansible/filter_plugins/networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..1255f258400755d98d27808a63d04e3e3d0d4b4b
--- /dev/null
+++ b/ansible/filter_plugins/networks.py
@@ -0,0 +1,196 @@
+from ansible import errors
+import jinja2
+import netaddr
+
+
+def _get_hostvar(context, var_name, inventory_hostname=None):
+    if inventory_hostname is None:
+        namespace = context
+    else:
+        if inventory_hostname not in context['hostvars']:
+            raise errors.AnsibleFilterError(
+                "Inventory hostname '%s' not in hostvars" % inventory_hostname)
+        namespace = context["hostvars"][inventory_hostname]
+    return namespace.get(var_name)
+
+
+@jinja2.contextfilter
+def net_attr(context, name, attr, inventory_hostname=None):
+    var_name = "%s_%s" % (name, attr)
+    return _get_hostvar(context, var_name, inventory_hostname)
+
+
+def _make_attr_filter(attr):
+    @jinja2.contextfilter
+    def func(context, name, inventory_hostname=None):
+        return net_attr(context, name, attr, inventory_hostname)
+    return func
+
+
+@jinja2.contextfilter
+def net_vip_address(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'vip_address', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_ip(context, name, inventory_hostname=None):
+    ips = net_attr(context, name, 'ips', inventory_hostname)
+    if ips:
+        if inventory_hostname is None:
+            inventory_hostname = _get_hostvar(context, "inventory_hostname")
+        return ips.get(inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_interface(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'interface', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_cidr(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'cidr', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_gateway(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'gateway', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_allocation_pool_start(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'allocation_pool_start', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_allocation_pool_end(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'allocation_pool_end', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_vlan(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'vlan', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_bridge_ports(context, name, inventory_hostname=None):
+    return net_attr(context, name, 'bridge_ports', inventory_hostname)
+
+
+@jinja2.contextfilter
+def net_interface_obj(context, name, inventory_hostname=None):
+    device = net_interface(context, name, inventory_hostname)
+    if not device:
+        raise errors.AnsibleFilterError(
+            "Network interface for network '%s' on host '%s' not found" %
+            (name, inventory_hostname))
+    ip = net_ip(context, name, inventory_hostname)
+    cidr = net_cidr(context, name, inventory_hostname)
+    netmask = str(netaddr.IPNetwork(cidr).netmask)
+    gateway = net_gateway(context, name, inventory_hostname)
+    vlan = net_vlan(context, name, inventory_hostname)
+    interface = {
+        'device': device,
+        'address': ip,
+        'netmask': netmask,
+        'gateway': gateway,
+        'vlan': vlan,
+        'bootproto': 'static',
+        'onboot': 'yes',
+    }
+    interface = {k: v for k, v in interface.items() if v is not None}
+    return interface
+
+
+@jinja2.contextfilter
+def net_bridge_obj(context, name, inventory_hostname=None):
+    device = net_interface(context, name, inventory_hostname)
+    if not device:
+        raise errors.AnsibleFilterError(
+            "Network interface for network '%s' on host '%s' not found" %
+            (name, inventory_hostname))
+    ip = net_ip(context, name, inventory_hostname)
+    cidr = net_cidr(context, name, inventory_hostname)
+    netmask = str(netaddr.IPNetwork(cidr).netmask)
+    gateway = net_gateway(context, name, inventory_hostname)
+    vlan = net_vlan(context, name, inventory_hostname)
+    ports = net_bridge_ports(context, name, inventory_hostname)
+    interface = {
+        'device': device,
+        'address': ip,
+        'netmask': netmask,
+        'gateway': gateway,
+        'vlan': vlan,
+        'ports': ports,
+        'bootproto': 'static',
+        'onboot': 'yes',
+    }
+    interface = {k: v for k, v in interface.items() if v is not None}
+    return interface
+
+
+@jinja2.contextfilter
+def net_is_ether(context, name, inventory_hostname=None):
+    return net_bridge_ports(context, name) is None
+
+
+@jinja2.contextfilter
+def net_is_bridge(context, name, inventory_hostname=None):
+    return net_bridge_ports(context, name) is not None
+
+
+@jinja2.contextfilter
+def net_select_ethers(context, names):
+    return [name for name in names if net_is_ether(context, name)]
+
+
+@jinja2.contextfilter
+def net_select_bridges(context, names):
+    return [name for name in names if net_is_bridge(context, name)]
+
+
+@jinja2.contextfilter
+def net_configdrive_network_device(context, name, inventory_hostname=None):
+    device = net_interface(context, name, inventory_hostname)
+    if not device:
+        raise errors.AnsibleFilterError(
+            "Network interface for network '%s' on host '%s' not found" %
+            (name, inventory_hostname))
+    ip = net_ip(context, name, inventory_hostname)
+    cidr = net_cidr(context, name, inventory_hostname)
+    netmask = str(netaddr.IPNetwork(cidr).netmask) if cidr is not None else None
+    gateway = net_gateway(context, name, inventory_hostname)
+    bootproto = 'static' if ip is not None else 'dhcp'
+    interface = {
+        'device': device,
+        'address': ip,
+        'netmask': netmask,
+        'gateway': gateway,
+        'bootproto': bootproto,
+    }
+    interface = {k: v for k, v in interface.items() if v is not None}
+    return interface
+
+
+class FilterModule(object):
+    """Networking filters."""
+
+    def filters(self):
+        return {
+            'net_attr': net_attr,
+            'net_vip_address': net_vip_address,
+            'net_fqdn': _make_attr_filter('fqdn'),
+            'net_ip': net_ip,
+            'net_interface': net_interface,
+            'net_cidr': net_cidr,
+            'net_gateway': net_gateway,
+            'net_allocation_pool_start': net_allocation_pool_start,
+            'net_allocation_pool_end': net_allocation_pool_end,
+            'net_vlan': net_vlan,
+            'net_interface_obj': net_interface_obj,
+            'net_bridge_obj': net_bridge_obj,
+            'net_is_ether': net_is_ether,
+            'net_is_bridge': net_is_bridge,
+            'net_select_ethers': net_select_ethers,
+            'net_select_bridges': net_select_bridges,
+            'net_configdrive_network_device': net_configdrive_network_device,
+        }
diff --git a/ansible/group_vars/all/bifrost b/ansible/group_vars/all/bifrost
new file mode 100644
index 0000000000000000000000000000000000000000..9a5d4c1b845a428a5230a68dd3153d9eeecb39cb
--- /dev/null
+++ b/ansible/group_vars/all/bifrost
@@ -0,0 +1,35 @@
+---
+# Kayobe configuration for Bifrost.
+
+###############################################################################
+# Diskimage-builder configuration.
+
+# DIB base OS element.
+kolla_bifrost_dib_os_element: "centos7"
+
+# List of DIB elements.
+kolla_bifrost_dib_elements:
+  - "serial-console"
+  - "vm"
+
+# DIB init element.
+kolla_bifrost_dib_init_element: "cloud-init-datasources"
+
+# DIB environment variables.
+kolla_bifrost_dib_env_vars:
+  DIB_CLOUD_INIT_DATASOURCES: "ConfigDrive"
+
+# List of DIB packages to install.
+kolla_bifrost_dib_packages: []
+
+###############################################################################
+# Ironic configuration.
+
+# Whether to enable ipmitool-based drivers.
+kolla_bifrost_enable_ipmitool_drivers: true
+
+###############################################################################
+# Inventory configuration.
+
+# Server inventory for Bifrost.
+kolla_bifrost_servers: {}
diff --git a/ansible/group_vars/all/dns b/ansible/group_vars/all/dns
new file mode 100644
index 0000000000000000000000000000000000000000..0c26c62784cb747ff51659e7cc1825f0f3cf8f9f
--- /dev/null
+++ b/ansible/group_vars/all/dns
@@ -0,0 +1,21 @@
+---
+###############################################################################
+# DNS.
+
+# List of DNS nameservers.
+resolv_nameservers:
+  - 8.8.8.8
+  - 8.8.4.4
+
+# DNS domain suffix.
+#resolv_domain:
+
+# List of DNS search suffixes.
+#resolv_search:
+
+# List of IP address and netmask pairs to sort addresses returned by
+# gethostbyname.
+#resolv_sortlist:
+
+# List of DNS options.
+#resolv_options:
diff --git a/ansible/group_vars/all/globals b/ansible/group_vars/all/globals
new file mode 100644
index 0000000000000000000000000000000000000000..4110806781af10bbae40f05f67497071b189aa4b
--- /dev/null
+++ b/ansible/group_vars/all/globals
@@ -0,0 +1,11 @@
+---
+# Kayobe global configuration.
+
+###############################################################################
+# Miscellaneous configuration.
+
+# Path to Kayobe configuration directory.
+kayobe_config_path: "{{ lookup('env', 'KAYOBE_CONFIG_PATH') | default('/etc/kayobe') }}"
+
+# Path in which to cache downloaded images.
+image_cache_path: "{{ ansible_user_dir ~ '/kayobe-image-cache' }}"
diff --git a/ansible/group_vars/all/kolla b/ansible/group_vars/all/kolla
new file mode 100644
index 0000000000000000000000000000000000000000..7b730e1dc64625cb2d5e454a14d9e203153ab6a8
--- /dev/null
+++ b/ansible/group_vars/all/kolla
@@ -0,0 +1,34 @@
+---
+###############################################################################
+# Kolla configuration.
+
+# Path to Kolla configuration directory.
+kolla_config_path: "{{ lookup('env', 'KOLLA_CONFIG_PATH') | default('/etc/kolla') }}"
+
+# Path to Kolla node custom configuration directory.
+kolla_node_custom_config_path: "{{ kolla_config_path }}/config"
+
+# Kolla base container image distribution.
+kolla_base_distro: "centos"
+
+# Kolla installation type: binary or source.
+kolla_install_type: "binary"
+
+# Kolla OpenStack release version. This should be a Docker image tag.
+kolla_openstack_release: "3.0.2"
+
+# Whether TLS is enabled for the external API endpoints.
+kolla_enable_tls_external: "no"
+
+# Path to external API certificate.
+kolla_external_fqdn_cert:
+
+# Whether debug logging is enabled.
+kolla_openstack_logging_debug: "False"
+
+###############################################################################
+# Kolla feature flag configuration.
+
+kolla_enable_glance: "yes"
+kolla_enable_ironic: "yes"
+kolla_enable_swift: "yes"
diff --git a/ansible/group_vars/all/network b/ansible/group_vars/all/network
new file mode 100644
index 0000000000000000000000000000000000000000..d9bec9179a917168e6ee9d5f08f0e0bea830005c
--- /dev/null
+++ b/ansible/group_vars/all/network
@@ -0,0 +1,10 @@
+---
+###############################################################################
+# Network roles.
+
+# Network role to network name mappings.
+provision_oc_net_name: 'provision_oc_net'
+provision_wl_net_name: 'provision_wl_net'
+external_net_name: 'external_net'
+storage_net_name: 'storage_net'
+storage_mgmt_net_name: 'storage_mgmt_net'
diff --git a/ansible/group_vars/all/ntp b/ansible/group_vars/all/ntp
new file mode 100644
index 0000000000000000000000000000000000000000..24bfafe07033560675a74afbf61de01dd4268cb0
--- /dev/null
+++ b/ansible/group_vars/all/ntp
@@ -0,0 +1,34 @@
+---
+# Kayobe NTP configuration.
+
+###############################################################################
+# Timezone.
+
+# Name of the local timezone.
+timezone: "{{ ansible_date_time.tz }}"
+
+###############################################################################
+# Network Time Protocol (NTP).
+
+# List of names of NTP servers.
+#ntp_config_server:
+
+# List of NTP restrictions to add to ntp.conf.
+#ntp_config_restrict:
+
+# List of addresses for NTP daemon to listen on.
+#ntp_config_listen:
+
+# Other NTP configuration options.
+#ntp_config_filegen:
+#ntp_config_statistics:
+#ntp_config_crypto:
+#ntp_config_includefile:
+#ntp_config_keys:
+#ntp_config_trustedkey:
+#ntp_config_requestkey:
+#ntp_config_controlkey:
+#ntp_config_broadcast:
+#ntp_config_broadcastclient:
+#ntp_config_multicastclient:
+#ntp_config_tinker_panic_enabled:
diff --git a/ansible/group_vars/controllers/network b/ansible/group_vars/controllers/network
new file mode 100644
index 0000000000000000000000000000000000000000..759490f6636ea674f27b8316ca98957335d4d099
--- /dev/null
+++ b/ansible/group_vars/controllers/network
@@ -0,0 +1,29 @@
+---
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which these nodes are attached.
+network_interfaces: >
+  {{ (controller_default_network_interfaces +
+      controller_extra_network_interfaces) | unique | list }}
+
+# List of default networks to which controller nodes are attached.
+controller_default_network_interfaces: >
+  {{ [provision_oc_net_name,
+      provision_wl_net_name,
+      internal_net_name,
+      external_net_name,
+      storage_net_name,
+      storage_mgmt_net_name] | unique | list }}
+
+# List of extra networks to which controller nodes are attached.
+controller_extra_network_interfaces: []
+
+###############################################################################
+# Kolla networking.
+
+# Name of the Neutron OVS bridge for the provisioning network.
+neutron_bridge_name: "br-ex"
+
+# External network interface for Neutron.
+neutron_external_interface: "{{ 'patch-' ~ neutron_bridge_name }}"
diff --git a/ansible/group_vars/controllers/swift b/ansible/group_vars/controllers/swift
new file mode 100644
index 0000000000000000000000000000000000000000..e80e6ab1ef2bfbc0c41424d696ca877d51b5c1db
--- /dev/null
+++ b/ansible/group_vars/controllers/swift
@@ -0,0 +1,16 @@
+---
+###############################################################################
+# OpenStack Swift configuration.
+
+# Base-2 logarithm of the number of partitions.
+# i.e. num_partitions=2^<swift_part_power>.
+swift_part_power: 10
+
+# Object replication count.
+swift_replication_count: "{{ [groups['controllers'] | length, 3] | min }}"
+
+# Minimum time in hours between moving a given partition.
+swift_min_part_hours: 1
+
+# Number of Swift Zones.
+swift_num_zones: 5
diff --git a/ansible/group_vars/seed/network b/ansible/group_vars/seed/network
new file mode 100644
index 0000000000000000000000000000000000000000..379999115a5876339b96ccf04381d10042080ca4
--- /dev/null
+++ b/ansible/group_vars/seed/network
@@ -0,0 +1,42 @@
+---
+###############################################################################
+# Network interface attachments.
+
+# List of networks to which these nodes are attached.
+network_interfaces: >
+  {{ (seed_default_network_interfaces +
+      seed_extra_network_interfaces) | unique | list }}
+
+# List of default networks to which seed nodes are attached.
+seed_default_network_interfaces: >
+  {{ [provision_oc_net_name] | unique | list }}
+
+# List of extra networks to which seed nodes are attached.
+seed_extra_network_interfaces: []
+
+###############################################################################
+# Network interface definitions.
+
+# Overcloud provisioning network IP information.
+# provision_oc_net_interface:
+# provision_oc_net_bridge_ports:
+
+# Workload provisioning network IP information.
+# provision_wl_net_interface:
+# provision_wl_net_bridge_ports:
+
+# Internal network IP information.
+# internal_net_interface:
+# internal_net_bridge_ports:
+
+# External network IP information.
+# external_net_interface:
+# external_net_bridge_ports:
+
+# Storage network IP information.
+# storage_net_interface:
+# storage_net_bridge_ports:
+
+# Storage management network IP information.
+# storage_mgmt_net_interface:
+# storage_mgmt_net_bridge_ports:
diff --git a/ansible/group_vars/seed/vm b/ansible/group_vars/seed/vm
new file mode 100644
index 0000000000000000000000000000000000000000..c3098816a0d7f750870a35257ef752d7169867b8
--- /dev/null
+++ b/ansible/group_vars/seed/vm
@@ -0,0 +1,54 @@
+---
+###############################################################################
+# Seed node VM configuration.
+
+# Name of the seed VM.
+seed_vm_name: "{{ inventory_hostname }}"
+
+# Memory in MB.
+seed_vm_memory_mb: "{{ 16 * 1024 }}"
+
+# Number of vCPUs.
+seed_vm_vcpus: 4
+
+# List of volumes.
+seed_vm_volumes:
+  - "{{ seed_vm_root_volume }}"
+  - "{{ seed_vm_data_volume }}"
+
+# Root volume.
+seed_vm_root_volume:
+  name: "{{ seed_vm_name }}-root"
+  pool: "{{ seed_vm_pool }}"
+  capacity: "{{ seed_vm_root_capacity }}"
+  format: "{{ seed_vm_root_format }}"
+  image: "{{ seed_vm_root_image }}"
+
+# Data volume.
+seed_vm_data_volume:
+  name: "{{ seed_vm_name }}-data"
+  pool: "{{ seed_vm_pool }}"
+  capacity: "{{ seed_vm_data_capacity }}"
+  format: "{{ seed_vm_data_format }}"
+
+# List of network interfaces.
+seed_vm_interfaces:
+  - network: default
+
+# Name of the storage pool for the seed VM volumes.
+seed_vm_pool: default
+
+# Capacity of the seed VM root volume.
+seed_vm_root_capacity: 50G
+
+# Format of the seed VM root volume.
+seed_vm_root_format: qcow2
+
+# Base image for the seed VM root volume.
+seed_vm_root_image:
+
+# Capacity of the seed VM data volume.
+seed_vm_data_capacity: 100G
+
+# Format of the seed VM data volume.
+seed_vm_data_format: qcow2
diff --git a/ansible/ip-allocation.yml b/ansible/ip-allocation.yml
new file mode 100644
index 0000000000000000000000000000000000000000..57aa07f7d9e22681a8837f07002cfc51e1d5ae1b
--- /dev/null
+++ b/ansible/ip-allocation.yml
@@ -0,0 +1,12 @@
+---
+- name: Ensure IP addresses are allocated
+  hosts: controllers
+  gather_facts: no
+  pre_tasks:
+    - set_fact:
+        ip_allocations: "{{ ip_allocations|default([]) + [{'net_name': item, 'cidr': item|net_cidr}] }}"
+      with_items: "{{ network_interfaces }}"
+  roles:
+    - role: ip-allocation
+      ip_allocation_filename: "{{ kayobe_config_path }}/network-allocation.yml"
+      ip_allocation_hostname: "{{ inventory_hostname }}"
diff --git a/ansible/kolla-bifrost.yml b/ansible/kolla-bifrost.yml
new file mode 100644
index 0000000000000000000000000000000000000000..17c5ff9920ef08e6b354b3876059a6afe7aa82ee
--- /dev/null
+++ b/ansible/kolla-bifrost.yml
@@ -0,0 +1,33 @@
+---
+- name: Ensure Kolla Bifrost is configured
+  hosts: config-mgmt
+  vars:
+    kolla_bifrost_extra_globals_path: "{{ kayobe_config_path ~ '/kolla/config/bifrost/bifrost.yml' }}"
+    kolla_bifrost_driver_map:
+      - { name: agent_ipmitool, enabled: "{{ kolla_bifrost_enable_ipmitool_drivers | bool }}" }
+
+  pre_tasks:
+    - name: Check whether a Kolla Bifrost extra globals configuration file exists
+      stat:
+        path: "{{ kolla_bifrost_extra_globals_path }}"
+      register: globals_stat
+
+    - name: Read the Kolla Bifrost extra globals configuration file
+      set_fact:
+        kolla_bifrost_extra_globals: "{{ lookup('template', kolla_bifrost_extra_globals_path) | from_yaml }}"
+      when: globals_stat.stat.exists
+
+  roles:
+    - role: kolla-bifrost
+
+      # Generate a list of enabled drivers from the map.
+      kolla_bifrost_enabled_drivers: >
+        {{ kolla_bifrost_driver_map | selectattr('enabled') | map(attribute='name') | list }}
+      kolla_bifrost_enable_pxe_drivers: false
+
+      # Network configuration.
+      kolla_bifrost_dhcp_pool_start: "{{ provision_oc_net_name | net_allocation_pool_start }}"
+      kolla_bifrost_dhcp_pool_end: "{{ provision_oc_net_name | net_allocation_pool_end }}"
+      kolla_bifrost_dnsmasq_router: "{{ provision_oc_net_name | net_gateway }}"
+      kolla_bifrost_dnsmasq_dns_servers: "{{ resolv_nameservers | default([]) }}"
+      kolla_bifrost_domain: "{{ resolv_domain | default }}"
diff --git a/ansible/kolla-host.yml b/ansible/kolla-host.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4ab8ad41f151fce21a4add893373cd6c49c3901d
--- /dev/null
+++ b/ansible/kolla-host.yml
@@ -0,0 +1,6 @@
+---
+- name: Ensure Kolla hosts are configured
+  hosts: seed:controllers
+  roles:
+    - role: kolla-host
+      kolla_authorized_key: "{{ lookup('file', lookup('env', 'HOME') ~ '/.ssh/id_rsa.pub') }}"
diff --git a/ansible/kolla-openstack.yml b/ansible/kolla-openstack.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4976df89f65d5994445b640c5a0bc06159dbf66e
--- /dev/null
+++ b/ansible/kolla-openstack.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure Kolla OpenStack components are configured
+  hosts: config-mgmt
+  pre_tasks:
+    - name: Check whether Kolla extra Glance configuration file exists
+      stat:
+        path: "{{ kayobe_config_path }}/kolla/config/glance.conf"
+      register: glance_stat
+
+    - name: Read the Kolla extra Glance configuration file
+      set_fact:
+        kolla_extra_glance: "{{ lookup('template', '{{ kayobe_config_path }}/kolla/config/glance.conf') }}"
+      when: glance_stat.stat.exists
+
+    - name: Check whether Kolla extra Ironic configuration file exists
+      stat:
+        path: "{{ kayobe_config_path }}/kolla/config/ironic.conf"
+      register: ironic_stat
+
+    - name: Read the Kolla extra Ironic configuration file
+      set_fact:
+        kolla_extra_ironic: "{{ lookup('template', '{{ kayobe_config_path }}/kolla/config/ironic.conf') }}"
+      when: ironic_stat.stat.exists
+
+  roles:
+    - role: kolla-openstack
diff --git a/ansible/kolla.yml b/ansible/kolla.yml
new file mode 100644
index 0000000000000000000000000000000000000000..69423e1b098fc5997270d6dace9d5f501baba0a6
--- /dev/null
+++ b/ansible/kolla.yml
@@ -0,0 +1,35 @@
+---
+- name: Ensure Kolla is configured
+  hosts: config-mgmt
+  vars:
+    controller_host: "{{ groups['controllers'][0] | default() }}"
+    seed_host: "{{ groups['seed'][0] | default() }}"
+  pre_tasks:
+    - name: Check whether a Kolla extra globals configuration file exists
+      stat:
+        path: "{{ kayobe_config_path ~ '/kolla/globals.yml' }}"
+      register: globals_stat
+
+    - name: Read the Kolla extra globals configuration file
+      set_fact:
+        kolla_extra_globals: "{{ lookup('template', kayobe_config_path ~ '/kolla/globals.yml') | from_yaml }}"
+      when: globals_stat.stat.exists
+  roles:
+    - role: kolla
+
+    - role: kolla-build
+
+    - role: kolla-ansible
+      kolla_internal_vip_address: "{{ internal_net_name | net_vip_address }}"
+      kolla_internal_fqdn: "{{ internal_net_name | net_fqdn or kolla_internal_vip_address }}"
+      kolla_external_vip_address: "{{ external_net_name | net_vip_address }}"
+      kolla_external_fqdn: "{{ external_net_name | net_fqdn or kolla_external_vip_address }}"
+      kolla_network_interface: "{% if controller_host %}{{ internal_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
+      kolla_external_vip_interface: "{% if controller_host %}{{ external_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
+      kolla_api_interface: "{% if controller_host %}{{ internal_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
+      kolla_storage_interface: "{% if controller_host %}{{ storage_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
+      kolla_cluster_interface: "{% if controller_host %}{{ storage_mgmt_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
+      kolla_neutron_bridge_name: "{% if controller_host %}{{ hostvars[controller_host]['neutron_bridge_name'] }}{% endif %}"
+      kolla_neutron_external_interface: "{% if controller_host %}{{ hostvars[controller_host]['neutron_external_interface'] }}{% endif %}"
+      kolla_bifrost_network_interface: "{% if seed_host %}{{ provision_oc_net_name | net_interface(seed_host) | replace('-', '_') }}{% endif %}"
+      kolla_provision_interface: "{% if controller_host %}{{ provision_wl_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
diff --git a/ansible/network.yml b/ansible/network.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3847e7d50f8dc3d8eaf790402c821a6ebb419618
--- /dev/null
+++ b/ansible/network.yml
@@ -0,0 +1,47 @@
+---
+- name: Ensure networking is configured
+  hosts: seed:controllers
+  pre_tasks:
+    - name: Ensure NetworkManager is disabled
+      service:
+        name: NetworkManager
+        state: stopped
+        enabled: no
+      become: True
+      register: nm_result
+      failed_when:
+        - "{{ nm_result | failed }}"
+        # Ugh, Ansible's service module doesn't handle uninstalled services.
+        - "{{ 'Could not find the requested service' not in nm_result.msg }}"
+
+  roles:
+    - role: ahuffman.resolv
+      become: True
+
+    - role: MichaelRigart.interfaces
+      interfaces_ether_interfaces: >
+        {{ network_interfaces |
+           net_select_ethers |
+           map('net_interface_obj') |
+           list }}
+      interfaces_bridge_interfaces: >
+        {{ network_interfaces |
+           net_select_bridges |
+           map('net_bridge_obj') |
+           list }}
+      become: True
+
+- name: Ensure controller workload provisioning network is configured
+  hosts: controllers
+  roles:
+    # Configure a virtual ethernet patch link to connect the workload provision
+    # network bridge to the Neutron OVS bridge.
+    - role: veth
+      veth_interfaces:
+        - device: "{{ 'patch-' ~ provision_wl_net_name | net_interface }}"
+          bootproto: "static"
+          bridge: "{{ provision_wl_net_name | net_interface }}"
+          peer_device: "{{ 'patch-' ~ neutron_bridge_name }}"
+          peer_bootproto: "static"
+          onboot: yes
+      when: "{{ provision_wl_net_name in network_interfaces }}"
diff --git a/ansible/ntp.yml b/ansible/ntp.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c8f923a9274063a08727c74a7c88339afbf05857
--- /dev/null
+++ b/ansible/ntp.yml
@@ -0,0 +1,8 @@
+---
+- name: Ensure NTP is installed and configured
+  hosts: seed:controllers
+  roles:
+    - role: yatesr.timezone
+      become: True
+    - role: resmo.ntp
+      become: True
diff --git a/ansible/requirements.yml b/ansible/requirements.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d5660ac10a7a121ef871da3b3ff9e69bdc0edde8
--- /dev/null
+++ b/ansible/requirements.yml
@@ -0,0 +1,6 @@
+---
+- src: ahuffman.resolv
+- src: jriguera.configdrive
+- src: MichaelRigart.interfaces
+- src: resmo.ntp
+- src: yatesr.timezone
diff --git a/ansible/roles/bootstrap/tasks/main.yml b/ansible/roles/bootstrap/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e297fd57213779afa2097453f67c4aae2c0f8454
--- /dev/null
+++ b/ansible/roles/bootstrap/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+- name: Ensure required packages are installed
+  yum:
+    name: "{{ item }}"
+    state: installed
+  become: True
+  with_items:
+    - git
+    - vim
+
+- name: Check whether an SSH key exists
+  stat:
+    path: "{{ ansible_user_dir }}/.ssh/id_rsa"
+  register: ssh_key_stat
+
+- name: Generate an SSH key
+  command: ssh-keygen -t rsa -N '' -f {{ ansible_user_dir }}/.ssh/id_rsa
+  when: not ssh_key_stat.stat.exists
+
+- name: Ensure SSH public key is in authorized keys
+  authorized_key:
+    user: "{{ ansible_user_id }}"
+    key: "{{ lookup('file', ansible_user_dir ~ '/.ssh/id_rsa.pub') }}"
+
+- name: Scan for SSH keys
+  command: ssh-keyscan {{ item }}
+  with_items:
+    - localhost
+    - 127.0.0.1
+  register: keyscan_result
+  changed_when: False
+
+- name: Ensure SSH keys are in known hosts
+  known_hosts:
+    host: "{{ item[0].item }}"
+    key: "{{ item[1] }}"
+  with_subelements:
+    - "{{ keyscan_result.results }}"
+    - stdout_lines
diff --git a/ansible/roles/disable-selinux/tasks/main.yml b/ansible/roles/disable-selinux/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5d0055904388693ae735777e116fd677a7c1233b
--- /dev/null
+++ b/ansible/roles/disable-selinux/tasks/main.yml
@@ -0,0 +1,56 @@
+---
+- name: Ensure required packages are installed
+  yum:
+    name: "{{ item }}"
+    state: installed
+  become: True
+  with_items:
+    - libselinux-python
+
+- name: Ensure SELinux is disabled
+  selinux:
+    state: disabled
+  register: selinux_result
+  become: True
+
+- name: Set a fact to determine whether we are running locally
+  set_fact:
+    is_local: "{{ lookup('pipe', 'hostname') in [ansible_hostname, ansible_nodename] }}"
+  when: "{{ selinux_result | changed }}"
+
+# Any SSH connection errors cause ansible to fail the task. We therefore
+# perform a manual SSH connection and allow the command to fail.
+- name: Reboot the system to apply SELinux changes (remote)
+  local_action:
+    # Use -tt to force a pseudo tty.
+    module: >
+      command
+      ssh -tt {{ ansible_user }}@{{ ansible_host | default(inventory_hostname) }}
+      sudo shutdown -r now "Applying SELinux changes"
+  register: reboot_result
+  failed_when:
+    - "{{ reboot_result | failed }}"
+    - "{{ 'closed by remote host' not in reboot_result.stderr }}"
+  when:
+    - "{{ selinux_result | changed }}"
+    - "{{ not is_local | bool }}"
+
+- name: Reboot the system to apply SELinux changes (local)
+  command: shutdown -r now "Applying SELinux changes"
+  become: True
+  when:
+    - "{{ selinux_result | changed }}"
+    - "{{ is_local | bool }}"
+
+# If we're running this locally we won't get here.
+- name: Wait for the system to boot up (remote)
+  local_action:
+    module: wait_for
+    host: "{{ ansible_host | default(inventory_hostname) }}"
+    port: 22
+    state: started
+    # Wait for 10 seconds before polling to ensure the node has shutdown.
+    delay: 10
+  when:
+    - "{{ selinux_result | changed }}"
+    - "{{ not is_local | bool }}"
diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4e065b724c67c758341569007d3af56872e0a848
--- /dev/null
+++ b/ansible/roles/docker/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+- name: Ensure user is in the docker group
+  user:
+    name: "{{ ansible_user_id }}"
+    groups: docker
+    append: yes
+  become: True
diff --git a/ansible/roles/ip-allocation/library/ip_allocation.py b/ansible/roles/ip-allocation/library/ip_allocation.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ddb9032f23ac87120123494b7ca47bcca4826fe
--- /dev/null
+++ b/ansible/roles/ip-allocation/library/ip_allocation.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+
+from ansible.module_utils.basic import *
+import sys
+
+# Store a list of import errors to report to the user.
+IMPORT_ERRORS=[]
+try:
+    import netaddr
+except Exception as e:
+    IMPORT_ERRORS.append(e)
+try:
+    import yaml
+except Exception as e:
+    IMPORT_ERRORS.append(e)
+
+
+DOCUMENTATION = """ 
+WM
+"""
+
+EXAMPLES = """ 
+WM
+"""
+
+
+def read_allocations(module):
+    """Read IP address allocations from the allocation file."""
+    filename = module.params['allocation_file']
+    try:
+        with open(filename, 'r') as f:
+            return yaml.load(f)
+    except IOError as e:
+        module.fail_json(msg="Failed to open allocation file %s for reading" % filename)
+    except yaml.YAMLError as e:
+        module.fail_json(msg="Failed to parse allocation file %s as YAML" % filename)
+
+
+def write_allocations(module, allocations):
+    """Write IP address allocations to the allocation file."""
+    filename = module.params['allocation_file']
+    try:
+        with open(filename, 'w') as f:
+            yaml.dump(allocations, f, default_flow_style=False)
+    except IOError as e:
+        module.fail_json(msg="Failed to open allocation file %s for writing" % filename)
+    except yaml.YAMLError as e:
+        module.fail_json(msg="Failed to dump allocation file %s as YAML" % filename)
+
+
+def update_allocation(module, allocations):
+    """Allocate an IP address on a network for a host.
+
+    :param module: AnsibleModule instance
+    :param allocations: Existing IP address allocations
+    """
+    net_name = module.params['net_name']
+    hostname = module.params['hostname']
+    cidr = module.params['cidr']
+    network = netaddr.IPNetwork(cidr)
+    result = {
+        'changed': False,
+    }
+    object_name = "%s_ips" % net_name
+    net_allocations = allocations.setdefault(object_name, {})
+    invalid_allocations = {hn: ip for hn, ip in net_allocations.items()
+                           if ip not in network}
+    if invalid_allocations:
+        module.fail_json(msg="Found invalid existing allocations in network %s: %s" %
+            (network, ", ".join("%s: %s" % (hn, ip) for hn, ip in invalid_allocations.items())))
+    if hostname not in net_allocations:
+        result['changed'] = True
+        ips = netaddr.IPSet(net_allocations.values())
+        free_ips = netaddr.IPSet([network]) - ips
+        for free_cidr in free_ips.iter_cidrs():
+            ip = free_cidr[0]
+            break
+        else:
+            module.fail_json(msg="No unallocated IP addresses for %s in %s" % (hostname, net_name))
+        free_ips.remove(ip)
+        net_allocations[hostname] = str(ip)
+    result['ip'] = net_allocations[hostname]
+    return result
+
+
+def allocate(module):
+    """Allocate an IP address for a host, updating the allocation file."""
+    allocations = read_allocations(module)
+    result = update_allocation(module, allocations)
+    if result['changed'] and not module.check_mode:
+        write_allocations(module, allocations)
+    return result
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            net_name=dict(required=True, type='str'),
+            hostname=dict(required=True, type='str'),
+            cidr=dict(required=True, type='str'),
+            allocation_file=dict(required=True, type='str'),
+        ),
+        supports_check_mode=True,
+    )
+
+    # Fail if there were any exceptions when importing modules.
+    if IMPORT_ERRORS:
+        module.fail_json(msg="Import errors: %s" %
+                         ", ".join([repr(e) for e in IMPORT_ERRORS]))
+
+    try:
+        results = allocate(module)
+    except Exception as e:
+        module.fail_json(msg="Failed to allocate IP address: %s" % repr(e))
+    else:
+        module.exit_json(**results)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/ansible/roles/ip-allocation/tasks/main.yml b/ansible/roles/ip-allocation/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cccf3c2bc25b0ed7b71918a20f48f316167b15b8
--- /dev/null
+++ b/ansible/roles/ip-allocation/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Ensure IP addresses are allocated
+  local_action:
+    module: ip_allocation
+    allocation_file: "{{ ip_allocation_filename }}"
+    hostname: "{{ ip_allocation_hostname }}"
+    net_name: "{{ item.net_name }}"
+    cidr: "{{ item.cidr }}"
+  with_items: "{{ ip_allocations }}"
diff --git a/ansible/roles/kolla-ansible/defaults/main.yml b/ansible/roles/kolla-ansible/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..916f2cb87b0a2a9d0a3f4554fa431e790fca43d5
--- /dev/null
+++ b/ansible/roles/kolla-ansible/defaults/main.yml
@@ -0,0 +1,139 @@
+---
+# Virtualenv directory where Kolla will be installed.
+kolla_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
+
+# Directory where Kolla config files will be installed.
+kolla_config_path:
+
+# Directory where Kolla custom configuration files will be installed.
+kolla_node_custom_config_path:
+
+# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
+kolla_base_distro:
+
+# Valid options are [ binary, source ]
+kolla_install_type:
+
+# Valid option is Docker repository tag
+kolla_openstack_release:
+
+# This should be a VIP, an unused IP on your network that will float between
+# the hosts running keepalived for high-availability. When running an All-In-One
+# without haproxy and keepalived, this should be the first IP on your
+# 'network_interface' as set in the Networking section below.
+kolla_internal_vip_address:
+
+# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
+# default it is the same as kolla_internal_vip_address.
+kolla_internal_fqdn:
+
+# This should be a VIP, an unused IP on your network that will float between
+# the hosts running keepalived for high-availability. It defaults to the
+# kolla_internal_vip_address, allowing internal and external communication to
+# share the same address.  Specify a kolla_external_vip_address to separate
+# internal and external requests between two VIPs.
+kolla_external_vip_address:
+
+# The Public address used to communicate with OpenStack as set in the public_url
+# for the endpoints that will be created. This DNS name should map to
+# kolla_external_vip_address.
+kolla_external_fqdn:
+
+
+####################
+# Networking options
+####################
+# This interface is what all your api services will be bound to by default.
+# Additionally, all vxlan/tunnel and storage network traffic will go over this
+# interface by default. This interface must contain an IPv4 address.
+# It is possible for hosts to have non-matching names of interfaces - these can
+# be set in an inventory file per host or per group or stored separately, see
+#     http://docs.ansible.com/ansible/intro_inventory.html
+# Yet another way to workaround the naming problem is to create a bond for the
+# interface on all hosts and give the bond name here. Similar strategy can be
+# followed for other types of interfaces.
+kolla_network_interface:
+
+# These can be adjusted for even more customization. The default is the same as
+# the 'network_interface'. These interfaces must contain an IPv4 address.
+kolla_external_vip_interface:
+kolla_api_interface:
+kolla_storage_interface:
+kolla_cluster_interface:
+
+# This is the raw interface given to neutron as its external network port. Even
+# though an IP address can exist on this interface, it will be unusable in most
+# configurations. It is recommended this interface not be configured with any IP
+# addresses for that reason.
+kolla_neutron_external_interface:
+
+# Name of the Neutron external bridge.
+kolla_neutron_bridge_name:
+
+# This is the interface to use for Bifrost bare metal provisioning of the
+# control plane.
+kolla_bifrost_network_interface:
+
+# This is the interface to use for bare metal provisioning. It is not a
+# standard kolla variable.
+kolla_provision_interface:
+
+
+####################
+# TLS options
+####################
+# To provide encryption and authentication on the kolla_external_vip_interface,
+# TLS can be enabled.  When TLS is enabled, certificates must be provided to
+# allow clients to perform authentication.
+kolla_enable_tls_external:
+kolla_external_fqdn_cert:
+
+
+####################
+# OpenStack options
+####################
+# Use these options to set the various log levels across all OpenStack projects
+# Valid options are [ True, False ]
+kolla_openstack_logging_debug:
+
+# OpenStack services can be enabled or disabled with these options
+#kolla_enable_aodh:
+#kolla_enable_barbican:
+#kolla_enable_ceilometer:
+#kolla_enable_central_logging:
+#kolla_enable_ceph:
+#kolla_enable_ceph_rgw:
+#kolla_enable_cinder:
+#kolla_enable_cinder_backend_iscsi:
+#kolla_enable_cinder_backend_lvm:
+#kolla_enable_cloudkitty:
+#kolla_enable_congress:
+#kolla_enable_etcd:
+#kolla_enable_gnocchi:
+#kolla_enable_grafana:
+#kolla_enable_heat:
+#kolla_enable_horizon:
+#kolla_enable_influxdb:
+#kolla_enable_ironic:
+#kolla_enable_kuryr:
+#kolla_enable_magnum:
+#kolla_enable_manila:
+#kolla_enable_mistral:
+#kolla_enable_mongodb:
+#kolla_enable_murano:
+#kolla_enable_multipathd:
+#kolla_enable_neutron_dvr:
+#kolla_enable_neutron_lbaas:
+#kolla_enable_neutron_qos:
+#kolla_enable_neutron_agent_ha:
+#kolla_enable_neutron_vpnaas:
+#kolla_enable_rally:
+#kolla_enable_sahara:
+#kolla_enable_senlin:
+#kolla_enable_swift:
+#kolla_enable_telegraf:
+#kolla_enable_tempest:
+#kolla_enable_watcher:
+
+# Free form extra configuration to append to {{ kolla_config_path }}/globals.yml.
+kolla_extra_globals:
diff --git a/ansible/roles/kolla-ansible/files/baremetal-docker_yum_repo.j2.patch b/ansible/roles/kolla-ansible/files/baremetal-docker_yum_repo.j2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f7b7bfa1d7143f12b8c48de3c7f2c5715805f163
--- /dev/null
+++ b/ansible/roles/kolla-ansible/files/baremetal-docker_yum_repo.j2.patch
@@ -0,0 +1,10 @@
+--- /usr/share/kolla/ansible/roles/baremetal/templates/docker_yum_repo.j2.old	2017-01-10 16:21:05.305626808 -0500
++++ /usr/share/kolla/ansible/roles/baremetal/templates/docker_yum_repo.j2	2017-01-10 16:21:09.216645923 -0500
+@@ -1,6 +1,6 @@
+ [docker-repo]
+ name=Docker main Repository
+-baseurl={{docker_yum_url}}/repo/main/{{ansible_distribution|lower}}/{{ansible_distribution_major_version|lower}}
++baseurl={{docker_yum_url}}/repo/main/{% if ansible_distribution == 'RedHat' %}centos{% else %}{{ansible_distribution|lower}}{% endif %}/{{ansible_distribution_major_version|lower}}
+ enabled=1
+ gpgcheck=1
+ gpgkey={{docker_yum_url}}/gpg
diff --git a/ansible/roles/kolla-ansible/files/baremetal-pre-install.yml.patch b/ansible/roles/kolla-ansible/files/baremetal-pre-install.yml.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9a3e2c0a95a63663ee8d3f5f2c1061eaf1e94fb6
--- /dev/null
+++ b/ansible/roles/kolla-ansible/files/baremetal-pre-install.yml.patch
@@ -0,0 +1,27 @@
+--- /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml.old  2017-01-06 17:23:12.444746830 +0000
++++ /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml      2017-01-06 17:22:27.864278879 +0000
+@@ -28,6 +28,7 @@
+           {% for host in groups['all'] %}
+           {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }} {{ hostvars[host]['ansible_hostname'] }}
+           {% endfor %}
++  become: True
+   when: customize_etc_hosts | bool
+ 
+ - name: ensure sudo group is present
+@@ -126,7 +127,7 @@
+     recurse: yes
+     owner: kolla
+     group: kolla
+-    mode: 755
++    mode: 0755
+   become: True
+   when: create_kolla_user | bool == True
+ 
+@@ -135,6 +136,6 @@
+     path: /etc/kolla
+     state: directory
+     recurse: yes
+-    mode: 666
++    mode: 0666
+   become: True
+   when: create_kolla_user | bool == False
diff --git a/ansible/roles/kolla-ansible/files/ironic-api.json.j2.patch b/ansible/roles/kolla-ansible/files/ironic-api.json.j2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..84546681b7fddcac40e7e073556f1fe6ebf0971d
--- /dev/null
+++ b/ansible/roles/kolla-ansible/files/ironic-api.json.j2.patch
@@ -0,0 +1,11 @@
+--- /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2.old      2017-01-06 13:56:52.881061188 +0000
++++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2  2017-01-06 14:00:21.757338271 +0000
+@@ -10,7 +10,7 @@
+     ],
+     "permissions": [
+         {
+-            "path": "/var/log/kolla/ironic"
++            "path": "/var/log/kolla/ironic",
+             "owner": "ironic:ironic",
+             "recurse": true
+         }
diff --git a/ansible/roles/kolla-ansible/files/ironic-conductor.json.j2.patch b/ansible/roles/kolla-ansible/files/ironic-conductor.json.j2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..97946dae3f909572d168d27e2ffc30c15a773586
--- /dev/null
+++ b/ansible/roles/kolla-ansible/files/ironic-conductor.json.j2.patch
@@ -0,0 +1,11 @@
+--- /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2.old        2017-01-06 14:28:35.048365453 +0000
++++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2    2017-01-06 14:28:44.858467071 +0000
+@@ -20,7 +20,7 @@
+             "recurse": true
+         },
+         {
+-            "path": "/tftpboot"
++            "path": "/tftpboot",
+             "owner": "ironic:ironic",
+             "recurse": true
+         }
diff --git a/ansible/roles/kolla-ansible/files/kolla-ansible.patch b/ansible/roles/kolla-ansible/files/kolla-ansible.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f5c6acaa3d92d349a3c7bf73e7f1c3bdfee0aaad
--- /dev/null
+++ b/ansible/roles/kolla-ansible/files/kolla-ansible.patch
@@ -0,0 +1,11 @@
+--- ansible/kolla-venv/bin/kolla-ansible.orig	2017-01-29 21:20:29.189225104 +0000
++++ ansible/kolla-venv/bin/kolla-ansible	2017-01-31 16:31:07.203695865 +0000
+@@ -9,6 +9,8 @@
+         BASEDIR=/usr/share/kolla
+     elif [[ ${dir_name} == "/usr/local/bin" ]]; then
+         BASEDIR=/usr/local/share/kolla
++    elif [[ ${dir_name} == "${VIRTUAL_ENV}/bin" ]]; then
++        BASEDIR="${VIRTUAL_ENV}/share/kolla"
+     else
+         BASEDIR="$(dirname ${dir_name})"
+     fi
diff --git a/ansible/roles/kolla-ansible/files/prechecks-port_checks.yml.patch b/ansible/roles/kolla-ansible/files/prechecks-port_checks.yml.patch
new file mode 100644
index 0000000000000000000000000000000000000000..565fbb4f780b7a927334fe158511b5be6f0d1c76
--- /dev/null
+++ b/ansible/roles/kolla-ansible/files/prechecks-port_checks.yml.patch
@@ -0,0 +1,11 @@
+--- ansible/kolla-venv/share/kolla/ansible/roles/prechecks/tasks/port_checks.yml.orig	2017-01-31 16:26:26.021463306 +0000
++++ ansible/kolla-venv/share/kolla/ansible/roles/prechecks/tasks/port_checks.yml	2017-01-31 16:27:22.045711530 +0000
+@@ -601,7 +601,7 @@
+   changed_when: false
+   failed_when: "'169.254.' not in kolla_internal_vip_address and \
+                 kolla_internal_vip_address | ipaddr(ip_addr_output.stdout.split()[3]) is none"
+-  when: enable_haproxy | bool
++  when: false
+ 
+ - name: Checking the network_interface is present
+   fail: "msg='Please check the network_interface property - interface {{ network_interface }} not found'"
diff --git a/ansible/roles/kolla-ansible/tasks/config.yml b/ansible/roles/kolla-ansible/tasks/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a0cc1da25710e1c5b97ff31162856de0d8063350
--- /dev/null
+++ b/ansible/roles/kolla-ansible/tasks/config.yml
@@ -0,0 +1,34 @@
+---
+- name: Ensure the Kolla configuration directores exist
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: 0755
+  become: True
+  with_items:
+    - "{{ kolla_config_path }}/inventory"
+    - "{{ kolla_node_custom_config_path }}"
+
+- name: Ensure the Kolla configuration files exist
+  template:
+    src: "{{ item.src }}"
+    dest: "{{ kolla_config_path }}/{{ item.dest }}"
+    mode: 0644
+  become: True
+  with_items:
+    - { src: seed.j2, dest: inventory/seed }
+    - { src: overcloud.j2, dest: inventory/overcloud }
+    - { src: globals.yml.j2, dest: globals.yml }
+
+- name: Check whether the Kolla passwords file exists
+  stat:
+    path: "{{ kolla_config_path }}/passwords.yml"
+  register: kolla_passwords_stat
+
+- name: Generate Kolla passwords
+  shell: >
+    cp {{ kolla_install_dir }}/etc_examples/kolla/passwords.yml {{ kolla_config_path }}/passwords.yml.generated
+    && {{ kolla_venv }}/bin/kolla-genpwd -p {{ kolla_config_path }}/passwords.yml.generated
+    && mv {{ kolla_config_path }}/passwords.yml.generated {{ kolla_config_path }}/passwords.yml
+  become: True
+  when: not kolla_passwords_stat.stat.exists
diff --git a/ansible/roles/kolla-ansible/tasks/install.yml b/ansible/roles/kolla-ansible/tasks/install.yml
new file mode 100644
index 0000000000000000000000000000000000000000..726f14d381fb883d46d64b705724a07a428ff9f8
--- /dev/null
+++ b/ansible/roles/kolla-ansible/tasks/install.yml
@@ -0,0 +1,19 @@
+---
+- name: Ensure Kolla patches are applied
+  patch:
+    src: "{{ item.src }}"
+    dest: "{{ item.dest }}"
+  become: True
+  with_items:
+    - src: baremetal-docker_yum_repo.j2.patch
+      dest: "{{ kolla_install_dir }}/ansible/roles/baremetal/templates/docker_yum_repo.j2"
+    - src: baremetal-pre-install.yml.patch
+      dest: "{{ kolla_install_dir }}/ansible/roles/baremetal/tasks/pre-install.yml"
+    - src: ironic-api.json.j2.patch
+      dest: "{{ kolla_install_dir }}/ansible/roles/ironic/templates/ironic-api.json.j2"
+    - src: ironic-conductor.json.j2.patch
+      dest: "{{ kolla_install_dir }}/ansible/roles/ironic/templates/ironic-conductor.json.j2"
+    - src: prechecks-port_checks.yml.patch
+      dest: "{{ kolla_install_dir }}/ansible/roles/prechecks/tasks/port_checks.yml"
+    - src: kolla-ansible.patch
+      dest: "{{ kolla_venv }}/bin/kolla-ansible"
diff --git a/ansible/roles/kolla-ansible/tasks/main.yml b/ansible/roles/kolla-ansible/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8570db68c389b54cc241c45737f459cb88b8ee7d
--- /dev/null
+++ b/ansible/roles/kolla-ansible/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: install.yml
+- include: config.yml
diff --git a/ansible/roles/kolla-ansible/templates/globals.yml.j2 b/ansible/roles/kolla-ansible/templates/globals.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..5070f35649bf94aeca628545a45a676fedc8f57f
--- /dev/null
+++ b/ansible/roles/kolla-ansible/templates/globals.yml.j2
@@ -0,0 +1,253 @@
+---
+# {{ ansible_managed }}
+
+# You can use this file to override _any_ variable throughout Kolla.
+# Additional options can be found in the 'kolla/ansible/group_vars/all.yml' file.
+# Default value of all the commented parameters are shown here, To override
+# the default value uncomment the parameter and change its value.
+
+###################
+# Kolla options
+###################
+# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
+config_strategy: "COPY_ALWAYS"
+
+# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
+kolla_base_distro: "{{ kolla_base_distro }}"
+
+# Valid options are [ binary, source ]
+kolla_install_type: "{{ kolla_install_type }}"
+
+# Valid option is Docker repository tag
+openstack_release: "{{ kolla_openstack_release }}"
+
+# This should be a VIP, an unused IP on your network that will float between
+# the hosts running keepalived for high-availability. When running an All-In-One
+# without haproxy and keepalived, this should be the first IP on your
+# 'network_interface' as set in the Networking section below.
+kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
+
+# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
+# default it is the same as kolla_internal_vip_address.
+kolla_internal_fqdn: "{{ kolla_internal_fqdn }}"
+
+# This should be a VIP, an unused IP on your network that will float between
+# the hosts running keepalived for high-availability. It defaults to the
+# kolla_internal_vip_address, allowing internal and external communication to
+# share the same address.  Specify a kolla_external_vip_address to separate
+# internal and external requests between two VIPs.
+kolla_external_vip_address: "{{ kolla_external_vip_address }}"
+
+# The Public address used to communicate with OpenStack as set in the public_url
+# for the endpoints that will be created. This DNS name should map to
+# kolla_external_vip_address.
+kolla_external_fqdn: "{{ kolla_external_fqdn }}"
+
+# Path to directory containing Kolla custom configuration files.
+node_custom_config: "{{ kolla_node_custom_config_path }}"
+
+####################
+# Docker options
+####################
+### Example: Private repository with authentication
+
+#docker_registry: "172.16.0.10:4000"
+#docker_namespace: "companyname"
+#docker_registry_username: "sam"
+#docker_registry_password: "correcthorsebatterystaple"
+
+
+####################
+# Networking options
+####################
+# This interface is what all your api services will be bound to by default.
+# Additionally, all vxlan/tunnel and storage network traffic will go over this
+# interface by default. This interface must contain an IPv4 address.
+# It is possible for hosts to have non-matching names of interfaces - these can
+# be set in an inventory file per host or per group or stored separately, see
+#     http://docs.ansible.com/ansible/intro_inventory.html
+# Yet another way to workaround the naming problem is to create a bond for the
+# interface on all hosts and give the bond name here. Similar strategy can be
+# followed for other types of interfaces.
+network_interface: "{{ kolla_network_interface }}"
+
+# These can be adjusted for even more customization. The default is the same as
+# the 'network_interface'. These interfaces must contain an IPv4 address.
+kolla_external_vip_interface: "{{ kolla_external_vip_interface }}"
+api_interface: "{{ kolla_api_interface }}"
+storage_interface: "{{ kolla_storage_interface }}"
+cluster_interface: "{{ kolla_cluster_interface }}"
+# NOTE: tunnel_interface is unused.
+
+# This is the raw interface given to neutron as its external network port. Even
+# though an IP address can exist on this interface, it will be unusable in most
+# configurations. It is recommended this interface not be configured with any IP
+# addresses for that reason.
+neutron_external_interface: "{{ kolla_neutron_external_interface }}"
+
+# Name of the Neutron external bridge.
+neutron_bridge_name: "{{ kolla_neutron_bridge_name }}"
+
+# This is the interface to use for Bifrost bare metal provisioning of the
+# control plane.
+bifrost_network_interface: "{{ kolla_bifrost_network_interface }}"
+
+# This is the interface to use for bare metal provisioning. It is not a
+# standard kolla variable.
+provision_interface: "{{ kolla_provision_interface }}"
+
+# Valid options are [ openvswitch, linuxbridge ]
+neutron_plugin_agent: "openvswitch"
+
+
+####################
+# keepalived options
+####################
+# Arbitrary unique number from 0..255
+#keepalived_virtual_router_id: "51"
+
+
+####################
+# TLS options
+####################
+# To provide encryption and authentication on the kolla_external_vip_interface,
+# TLS can be enabled.  When TLS is enabled, certificates must be provided to
+# allow clients to perform authentication.
+kolla_enable_tls_external: "{{ kolla_enable_tls_external }}"
+kolla_external_fqdn_cert: "{{ kolla_external_fqdn_cert }}"
+
+
+####################
+# OpenStack options
+####################
+# Use these options to set the various log levels across all OpenStack projects
+# Valid options are [ True, False ]
+openstack_logging_debug: {{ kolla_openstack_logging_debug }}
+
+# Valid options are [ novnc, spice ]
+#nova_console: "novnc"
+
+# OpenStack services can be enabled or disabled with these options
+{% for feature_flag in kolla_feature_flags %}
+{% if ('kolla_enable_' ~ feature_flag) in hostvars[inventory_hostname] %}
+enable_{{ feature_flag }}: {{ hostvars[inventory_hostname]['kolla_enable_' ~ feature_flag] | bool }}
+{% endif %}
+{% endfor %}
+
+###################
+# Ceph options
+###################
+# Ceph can be setup with a caching to improve performance. To use the cache you
+# must provide separate disks than those for the OSDs
+#ceph_enable_cache: "no"
+# Valid options are [ forward, none, writeback ]
+#ceph_cache_mode: "writeback"
+
+# A requirement for using the erasure-coded pools is you must setup a cache tier
+# Valid options are [ erasure, replicated ]
+#ceph_pool_type: "replicated"
+
+
+#######################
+# Keystone options
+#######################
+
+# Valid options are [ uuid, fernet ]
+#keystone_token_provider: 'uuid'
+
+# Interval to rotate fernet keys by (in seconds). Must be an interval of
+# 60(1 min), 120(2 min), 180(3 min), 240(4 min), 300(5 min), 360(6 min),
+# 600(10 min), 720(12 min), 900(15 min), 1200(20 min), 1800(30 min),
+# 3600(1 hour), 7200(2 hour), 10800(3 hour), 14400(4 hour), 21600(6 hour),
+# 28800(8 hour), 43200(12 hour), 86400(1 day), 604800(1 week).
+#fernet_token_expiry: 86400
+
+
+#######################
+# Glance options
+#######################
+# Configure image backend.
+#glance_backend_file: "yes"
+#glance_backend_ceph: "no"
+
+#######################
+# Ceilometer options
+#######################
+# Valid options are [ mongodb, mysql, gnocchi ]
+#ceilometer_database_type: "mongodb"
+
+
+#######################
+# Gnocchi options
+#######################
+# Valid options are [ file, ceph ]
+#gnocchi_backend_storage: "{% raw %}{{ 'ceph' if enable_ceph|bool else 'file' }}{% endraw %}"
+
+
+#######################
+# Cinder options
+#######################
+# Enable / disable Cinder backends
+#cinder_backend_ceph: "{% raw %}{{ enable_ceph }}{% endraw %}"
+
+#cinder_volume_group: "cinder-volumes"
+
+
+#######################
+# Nova options
+#######################
+#nova_backend_ceph: "{% raw %}{{ enable_ceph }}{% endraw %}"
+
+
+#######################
+# Horizon options
+#######################
+#horizon_backend_database: "no"
+
+
+#######################################
+# Manila - Shared File Systems Options
+#######################################
+# Valid options are [ True, False ]
+#driver_handles_share_servers: "True"
+
+
+##################################
+# Swift - Object Storage Options
+##################################
+# Swift expects block devices to be available for storage. Two types of storage
+# are supported: 1 - storage device with a special partition name and filesystem
+# label, 2 - unpartitioned disk  with a filesystem. The label of this filesystem
+# is used to detect the disk which Swift will be using.
+
+# Swift support two mathcing modes, valid options are [ prefix, strict ]
+#swift_devices_match_mode: "strict"
+
+# This parameter defines matching pattern: if "strict" mode was selected,
+# for swift_devices_match_mode then swift_device_name should specify the name of
+# the special swift partition for example: "KOLLA_SWIFT_DATA", if "prefix" mode was
+# selected then swift_devices_name should specify a pattern which would match to
+# filesystems' labels prepared for swift.
+#swift_devices_name: "KOLLA_SWIFT_DATA"
+
+
+################################################
+# Tempest - The OpenStack Integration Test Suite
+################################################
+# following value must be set when enable tempest
+tempest_image_id:
+tempest_flavor_ref_id:
+tempest_public_network_id:
+tempest_floating_network_name:
+
+# tempest_image_alt_id: "{% raw %}{{ tempest_image_id }}{% endraw %}"
+# tempest_flavor_ref_alt_id: "{% raw %}{{ tempest_flavor_ref_id }}{% endraw %}"
+
+
+{% if kolla_extra_globals %}
+#######################
+# Extra configuration
+#######################
+
+{{ kolla_extra_globals|to_nice_yaml }}
+{% endif %}
diff --git a/ansible/roles/kolla-ansible/templates/overcloud.j2 b/ansible/roles/kolla-ansible/templates/overcloud.j2
new file mode 100644
index 0000000000000000000000000000000000000000..6fd78fd1f538c92b754b300f4023b43edaa7cebf
--- /dev/null
+++ b/ansible/roles/kolla-ansible/templates/overcloud.j2
@@ -0,0 +1,409 @@
+# {{ ansible_managed }}
+
+# Overcloud inventory for Kolla. Adapted from multinode inventory in Kolla
+# repository.
+
+[controllers]
+# These hostnames must be resolvable from your deployment host
+{% for controller in groups['controllers'] %}
+{% set controller_hv=hostvars[controller] %}
+{{ controller }}{% if "ansible_host" in controller_hv %}    ansible_host={{ controller_hv["ansible_host"] }}{% endif %}
+{% endfor %}
+
+[controllers:vars]
+ansible_user=kolla
+ansible_become=true
+
+# These initial groups are the only groups required to be modified. The
+# additional groups are for more control of the environment.
+[control:children]
+controllers
+
+# The network nodes are where your l3-agent and loadbalancers will run
+# This can be the same as a host in the control group
+[network:children]
+controllers
+
+[compute:children]
+controllers
+
+[monitoring:children]
+controllers
+
+[storage:children]
+controllers
+
+[baremetal:children]
+control
+network
+compute
+storage
+monitoring
+
+# You can explicitly specify which hosts run each project by updating the
+# groups in the sections below. Common services are grouped together.
+[collectd:children]
+compute
+
+[grafana:children]
+monitoring
+
+[etcd:children]
+control
+
+[influxdb:children]
+monitoring
+
+[kibana:children]
+control
+
+[telegraf:children]
+monitoring
+
+[elasticsearch:children]
+control
+
+[haproxy:children]
+network
+
+[mariadb:children]
+control
+
+[rabbitmq:children]
+control
+
+[mongodb:children]
+control
+
+[keystone:children]
+control
+
+[glance:children]
+control
+
+[nova:children]
+control
+
+[neutron:children]
+network
+
+[cinder:children]
+control
+
+[cloudkitty:children]
+control
+
+[memcached:children]
+control
+
+[horizon:children]
+control
+
+[swift:children]
+control
+
+[barbican:children]
+control
+
+[heat:children]
+control
+
+[murano:children]
+control
+
+[ironic:children]
+control
+
+[ceph:children]
+control
+
+[magnum:children]
+control
+
+[sahara:children]
+control
+
+[mistral:children]
+control
+
+[manila:children]
+control
+
+[ceilometer:children]
+control
+
+[aodh:children]
+control
+
+[congress:children]
+control
+
+[gnocchi:children]
+control
+
+# Tempest
+[tempest:children]
+control
+
+[senlin:children]
+control
+
+[vmtp:children]
+control
+
+[watcher:children]
+control
+
+[rally:children]
+control
+
+# Additional control implemented here. These groups allow you to control which
+# services run on which hosts at a per-service level.
+#
+# Word of caution: Some services are required to run on the same host to
+# function appropriately. For example, neutron-metadata-agent must run on the
+# same host as the l3-agent and (depending on configuration) the dhcp-agent.
+
+# Glance
+[glance-api:children]
+glance
+
+[glance-registry:children]
+glance
+
+# Nova
+[nova-api:children]
+nova
+
+[nova-conductor:children]
+nova
+
+[nova-consoleauth:children]
+nova
+
+[nova-novncproxy:children]
+nova
+
+[nova-scheduler:children]
+nova
+
+[nova-spicehtml5proxy:children]
+nova
+
+[nova-compute-ironic:children]
+nova
+
+# Neutron
+[neutron-server:children]
+control
+
+[neutron-dhcp-agent:children]
+neutron
+
+[neutron-l3-agent:children]
+neutron
+
+[neutron-lbaas-agent:children]
+neutron
+
+[neutron-metadata-agent:children]
+neutron
+
+[neutron-vpnaas-agent:children]
+neutron
+
+# Ceph
+[ceph-mon:children]
+ceph
+
+[ceph-rgw:children]
+ceph
+
+[ceph-osd:children]
+storage
+
+# Cinder
+[cinder-api:children]
+cinder
+
+[cinder-backup:children]
+storage
+
+[cinder-scheduler:children]
+cinder
+
+[cinder-volume:children]
+storage
+
+# Cloudkitty
+[cloudkitty-api:children]
+cloudkitty
+
+[cloudkitty-processor:children]
+cloudkitty
+
+# iSCSI
+[iscsid:children]
+compute
+storage
+ironic-conductor
+
+[tgtd:children]
+storage
+
+# Manila
+[manila-api:children]
+manila
+
+[manila-scheduler:children]
+manila
+
+[manila-share:children]
+network
+
+# Swift
+[swift-proxy-server:children]
+swift
+
+[swift-account-server:children]
+storage
+
+[swift-container-server:children]
+storage
+
+[swift-object-server:children]
+storage
+
+# Barbican
+[barbican-api:children]
+barbican
+
+[barbican-keystone-listener:children]
+barbican
+
+[barbican-worker:children]
+barbican
+
+# Heat
+[heat-api:children]
+heat
+
+[heat-api-cfn:children]
+heat
+
+[heat-engine:children]
+heat
+
+# Murano
+[murano-api:children]
+murano
+
+[murano-engine:children]
+murano
+
+# Ironic
+[ironic-api:children]
+ironic
+
+[ironic-conductor:children]
+ironic
+
+[ironic-inspector:children]
+ironic
+
+[ironic-pxe:children]
+ironic
+
+# Magnum
+[magnum-api:children]
+magnum
+
+[magnum-conductor:children]
+magnum
+
+# Sahara
+[sahara-api:children]
+sahara
+
+[sahara-engine:children]
+sahara
+
+# Mistral
+[mistral-api:children]
+mistral
+
+[mistral-executor:children]
+mistral
+
+[mistral-engine:children]
+mistral
+
+# Ceilometer
+[ceilometer-api:children]
+ceilometer
+
+[ceilometer-central:children]
+ceilometer
+
+[ceilometer-notification:children]
+ceilometer
+
+[ceilometer-collector:children]
+ceilometer
+
+[ceilometer-compute:children]
+compute
+
+# Aodh
+[aodh-api:children]
+aodh
+
+[aodh-evaluator:children]
+aodh
+
+[aodh-listener:children]
+aodh
+
+[aodh-notifier:children]
+aodh
+
+# Congress
+[congress-api:children]
+congress
+
+[congress-datasource:children]
+congress
+
+[congress-policy-engine:children]
+congress
+
+# Gnocchi
+[gnocchi-api:children]
+gnocchi
+
+[gnocchi-statsd:children]
+gnocchi
+
+[gnocchi-metricd:children]
+gnocchi
+
+# Multipathd
+[multipathd:children]
+compute
+
+# Watcher
+[watcher-api:children]
+watcher
+
+[watcher-engine:children]
+watcher
+
+[watcher-applier:children]
+watcher
+
+# Senlin
+[senlin-api:children]
+senlin
+
+[senlin-engine:children]
+senlin
diff --git a/ansible/roles/kolla-ansible/templates/seed.j2 b/ansible/roles/kolla-ansible/templates/seed.j2
new file mode 100644
index 0000000000000000000000000000000000000000..f8798a068c4acfa28ea5c1e9083212ad3ca3177a
--- /dev/null
+++ b/ansible/roles/kolla-ansible/templates/seed.j2
@@ -0,0 +1,17 @@
+# {{ ansible_managed }}
+
+# Simple inventory for bootstrapping Kolla seed node.
+[seed]
+{% for seed in groups['seed'] %}
+{% set seed_hv=hostvars[seed] %}
+{{ seed }}{% if "ansible_host" in seed_hv %}    ansible_host={{ seed_hv["ansible_host"] }}{% endif %}
+{% endfor %}
+
+[seed:vars]
+ansible_user=kolla
+
+[baremetal:children]
+seed
+
+[bifrost:children]
+seed
diff --git a/ansible/roles/kolla-ansible/vars/main.yml b/ansible/roles/kolla-ansible/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..adee68bbe215f102fe1ae465b54823f47325f1ca
--- /dev/null
+++ b/ansible/roles/kolla-ansible/vars/main.yml
@@ -0,0 +1,42 @@
+---
+kolla_install_dir: "{{ kolla_venv }}/share/kolla"
+
+# List of features supported by Kolla as enable_* flags.
+kolla_feature_flags:
+  - aodh
+  - barbican
+  - ceilometer
+  - central_logging
+  - ceph
+  - ceph_rgw
+  - cinder
+  - cinder_backend_iscsi
+  - cinder_backend_lvm
+  - cloudkitty
+  - congress
+  - etcd
+  - gnocchi
+  - grafana
+  - heat
+  - horizon
+  - influxdb
+  - ironic
+  - kuryr
+  - magnum
+  - manila
+  - mistral
+  - mongodb
+  - murano
+  - multipathd
+  - neutron_dvr
+  - neutron_lbaas
+  - neutron_qos
+  - neutron_agent_ha
+  - neutron_vpnaas
+  - rally
+  - sahara
+  - senlin
+  - swift
+  - telegraf
+  - tempest
+  - watcher
diff --git a/ansible/roles/kolla-bifrost/defaults/main.yml b/ansible/roles/kolla-bifrost/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d0fed6d54c1b3f1e20a7d0eaf1120a6cd050c068
--- /dev/null
+++ b/ansible/roles/kolla-bifrost/defaults/main.yml
@@ -0,0 +1,46 @@
+---
+# Directory where Kolla custom configuration files will be installed.
+kolla_node_custom_config_path:
+
+# DIB image OS element.
+kolla_bifrost_dib_os_element:
+
+# List of DIB image elements.
+kolla_bifrost_dib_elements: []
+
+# DIB image init element.
+kolla_bifrost_dib_init_element:
+
+# DIB image environment variables.
+kolla_bifrost_dib_env_vars: {}
+
+# List of DIB image packages.
+kolla_bifrost_dib_packages: []
+
+# List of Ironic drivers to enable.
+kolla_bifrost_enabled_drivers: []
+
+# Whether to enable the Ironic PXE drivers.
+kolla_bifrost_enable_pxe_drivers: false
+
+# List of Ironic PXE drivers to enable if kolla_bifrost_enable_pxe_drivers.
+kolla_bifrost_pxe_drivers: []
+
+# IP address range for DHCP.
+kolla_bifrost_dhcp_pool_start:
+kolla_bifrost_dhcp_pool_end:
+
+# Default route provided to nodes via DHCP.
+kolla_bifrost_dnsmasq_router:
+
+# List of DNS servers provided to nodes via DHCP.
+kolla_bifrost_dnsmasq_dns_servers: []
+
+# DNS domain provided to nodes via DHCP.
+kolla_bifrost_domain:
+
+# Server inventory to be configured in {{ kolla_node_custom_config_path }}/bifrost/servers.yml.
+kolla_bifrost_servers: {}
+
+# Free form extra configuration to append to {{ kolla_node_custom_config_path }}/bifrost/bifrost.yml.
+kolla_bifrost_extra_globals:
diff --git a/ansible/roles/kolla-bifrost/tasks/main.yml b/ansible/roles/kolla-bifrost/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..eae1faf6db7bde662ecf2f54509dba2e653c2caa
--- /dev/null
+++ b/ansible/roles/kolla-bifrost/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Ensure the Kolla Bifrost configuration directores exist
+  file:
+    path: "{{ kolla_node_custom_config_path }}/bifrost"
+    state: directory
+    mode: 0755
+  become: True
+
+- name: Ensure the Kolla Bifrost configuration files exist
+  template:
+    src: "{{ item.src }}"
+    dest: "{{ kolla_node_custom_config_path }}/bifrost/{{ item.dest }}"
+    mode: 0644
+  become: True
+  with_items:
+    - { src: bifrost.yml.j2, dest: bifrost.yml }
+    - { src: dib.yml.j2, dest: dib.yml }
+    - { src: servers.yml.j2, dest: servers.yml }
diff --git a/ansible/roles/kolla-bifrost/templates/bifrost.yml.j2 b/ansible/roles/kolla-bifrost/templates/bifrost.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c2afec1247b9333cb44a1555770fa54fabb0fa7c
--- /dev/null
+++ b/ansible/roles/kolla-bifrost/templates/bifrost.yml.j2
@@ -0,0 +1,35 @@
+---
+# List of enabled Ironic drivers.
+enabled_drivers: "{{ kolla_bifrost_enabled_drivers | join(',') }}"
+
+# Whether to enable legacy PXE/iscsi drivers.
+enable_pxe_drivers: {{ kolla_bifrost_enable_pxe_drivers | bool }}
+
+# List of legacy PXE/iscsi drivers to enable.
+pxe_drivers: "{{ kolla_bifrost_pxe_drivers | join(',') }}"
+
+# IP address range for DHCP.
+dhcp_pool_start: "{{ kolla_bifrost_dhcp_pool_start }}"
+dhcp_pool_end: "{{ kolla_bifrost_dhcp_pool_end }}"
+
+{% if kolla_bifrost_dnsmasq_router %}
+# Default route provided to nodes via DHCP.
+dnsmasq_router: "{{ kolla_bifrost_dnsmasq_router }}"
+{% endif %}
+
+{% if kolla_bifrost_dnsmasq_dns_servers %}
+# DNS servers provided to nodes via DHCP.
+dnsmasq_dns_servers: "{{ kolla_bifrost_dnsmasq_dns_servers | join(',') }}"
+{% endif %}
+
+{% if kolla_bifrost_domain %}
+# DNS domain provided to nodes via DHCP.
+domain: "{{ kolla_bifrost_domain }}"
+{% endif %}
+
+{% if kolla_bifrost_extra_globals %}
+###############################################################################
+# Extra configuration
+
+{{ kolla_bifrost_extra_globals|to_nice_yaml }}
+{% endif %}
diff --git a/ansible/roles/kolla-bifrost/templates/dib.yml.j2 b/ansible/roles/kolla-bifrost/templates/dib.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..65147b3118b48099b2f771e38b4cf38a4e412bd7
--- /dev/null
+++ b/ansible/roles/kolla-bifrost/templates/dib.yml.j2
@@ -0,0 +1,12 @@
+---
+# Diskimage-builder element for base OS.
+dib_os_element: "{{ kolla_bifrost_dib_os_element }}"
+
+# List of DIB image elements.
+dib_elements: "{{ (kolla_bifrost_dib_elements + [kolla_bifrost_dib_init_element]) | join(' ') }}"
+
+# DIB image environment variables.
+dib_env_vars: "{{ kolla_bifrost_dib_env_vars }}"
+
+# List of DIB image packages.
+dib_packages: "{{ kolla_bifrost_dib_packages | join(',') }}"
diff --git a/ansible/roles/kolla-bifrost/templates/servers.yml.j2 b/ansible/roles/kolla-bifrost/templates/servers.yml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..00754785414ed66672aa187225ca4d0687d29d86
--- /dev/null
+++ b/ansible/roles/kolla-bifrost/templates/servers.yml.j2
@@ -0,0 +1,2 @@
+---
+{{ kolla_bifrost_servers | to_nice_yaml }}
diff --git a/ansible/roles/kolla-build/defaults/main.yml b/ansible/roles/kolla-build/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ad57376ab664cb0b603a0c73391dfbdd3230c16a
--- /dev/null
+++ b/ansible/roles/kolla-build/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+# Directory where Kolla config files will be installed.
+kolla_config_path:
+
+# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
+kolla_base_distro:
+
+# Valid options are [ binary, source ]
+kolla_install_type:
+
+# Valid option is Docker repository tag
+kolla_openstack_release:
diff --git a/ansible/roles/kolla-build/tasks/main.yml b/ansible/roles/kolla-build/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e4eac152f0e5da094749b809108bbcf4d6f52b77
--- /dev/null
+++ b/ansible/roles/kolla-build/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Ensure the Kolla build configuration files exist
+  template:
+    src: "{{ item.src }}"
+    dest: "{{ kolla_config_path }}/{{ item.dest }}"
+    mode: 0644
+  become: True
+  with_items:
+    - { src: kolla-build.conf.j2, dest: kolla-build.conf }
+    - { src: template-override.j2.j2, dest: template-override.j2 }
diff --git a/ansible/roles/kolla-build/templates/kolla-build.conf.j2 b/ansible/roles/kolla-build/templates/kolla-build.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..f54e241df2de1651dbdb6d1c8ad3928478f61136
--- /dev/null
+++ b/ansible/roles/kolla-build/templates/kolla-build.conf.j2
@@ -0,0 +1,15 @@
+# {{ ansible_managed }}
+
+[DEFAULT]
+
+# Base container image distribution.
+base={{ kolla_base_distro }}
+
+# Method of OpenStack install. Valid options are [ binary, source ]
+type={{ kolla_install_type }}
+
+# Docker image tag to apply.
+tag={{ kolla_openstack_release }}
+
+# Path to a file containing template overrides.
+template_override={{ kolla_config_path }}/template-override.j2
diff --git a/ansible/roles/kolla-build/templates/template-override.j2.j2 b/ansible/roles/kolla-build/templates/template-override.j2.j2
new file mode 100644
index 0000000000000000000000000000000000000000..c108f318ac8ee11c2d2faa20ab069dd0b05a3454
--- /dev/null
+++ b/ansible/roles/kolla-build/templates/template-override.j2.j2
@@ -0,0 +1,11 @@
+# {{ ansible_managed }}
+
+{% raw %}
+{% extends parent_template %}
+
+# Disable troublesome keys
+{% set base_yum_repo_keys_override=['http://yum.mariadb.org/RPM-GPG-KEY-MariaDB'] %}
+
+# Disable repos with troublesome keys
+{% set base_yum_repo_files_override=['MariaDB.repo'] %}
+{% endraw %}
diff --git a/ansible/roles/kolla-host/defaults/main.yml b/ansible/roles/kolla-host/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cdeda3486fba5103b6160351a1d83bde66f95d28
--- /dev/null
+++ b/ansible/roles/kolla-host/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# SSH public key to be authorized by the Kolla host.
+kolla_authorized_key:
diff --git a/ansible/roles/kolla-host/tasks/main.yml b/ansible/roles/kolla-host/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..46834d808a83b2bdb00ba070dbfad4e09e3252b3
--- /dev/null
+++ b/ansible/roles/kolla-host/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Ensure required packages are installed
+  yum:
+    name: "{{ item }}"
+    state: installed
+  become: True
+  with_items:
+    - vim
+
+- name: Ensure Kolla user has authorized our SSH key
+  authorized_key:
+    user: kolla
+    key: "{{ kolla_authorized_key }}"
+  become: True
diff --git a/ansible/roles/kolla-openstack/defaults/main.yml b/ansible/roles/kolla-openstack/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b0820ae4fb81a095bc312dd628116d1456cde081
--- /dev/null
+++ b/ansible/roles/kolla-openstack/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+# Directory where Kolla custom configuration files will be installed.
+kolla_node_custom_config_path:
+
+# List of enabled Ironic drivers.
+ironic_drivers:
+  - agent_ssh
+  - agent_ipmitool
+  - pxe_ssh
+  - pxe_ipmitool
+
+# Free form extra configuration to append to glance-api.conf and
+# glance-registry.conf.
+kolla_extra_glance:
+
+# Free form extra configuration to append to ironic.conf.
+kolla_extra_ironic:
diff --git a/ansible/roles/kolla-openstack/tasks/main.yml b/ansible/roles/kolla-openstack/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..65f5c00f1d193822d1bf2372abf245aeface31c6
--- /dev/null
+++ b/ansible/roles/kolla-openstack/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Ensure the Kolla OpenStack configuration directores exist
+  file:
+    path: "{{ kolla_node_custom_config_path }}/{{ item.name }}"
+    state: directory
+    owner: kolla
+    group: kolla
+    mode: 0755
+  become: True
+  with_items:
+    - { name: swift, enabled: "{{ kolla_enable_swift }}" }
+  when: "{{ item.enabled | bool }}"
+
+- name: Ensure the Kolla OpenStack configuration files exist
+  template:
+    src: "{{ item.src }}"
+    dest: "{{ kolla_node_custom_config_path }}/{{ item.dest }}"
+    owner: kolla
+    group: kolla
+    mode: 0644
+  become: True
+  with_items:
+    - { src: glance.conf.j2, dest: glance.conf, enabled: "{{ kolla_enable_glance }}" }
+    - { src: ironic.conf.j2, dest: ironic.conf, enabled: "{{ kolla_enable_ironic }}" }
diff --git a/ansible/roles/kolla-openstack/templates/glance.conf.j2 b/ansible/roles/kolla-openstack/templates/glance.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..cf4c9de77757f789ff757e591c9fbbed20b79938
--- /dev/null
+++ b/ansible/roles/kolla-openstack/templates/glance.conf.j2
@@ -0,0 +1,38 @@
+# {{ ansible_managed }}
+
+{% if kolla_enable_swift | bool %}
+[glance_store]
+
+default_store=swift
+stores=swift
+{% raw %}
+swift_store_user=service:{{ glance_keystone_user }}
+swift_store_key={{ glance_keystone_password }}
+swift_store_auth_version=2
+swift_store_auth_address={{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v2.0
+{% endraw %}
+swift_store_endpoint_type=internalURL
+swift_store_create_container_on_put=True
+
+#swift_store_config_file=/etc/glance/glance-api.conf
+#default_swift_reference=swift_store_ref
+
+#[swift_store_ref]
+#{% raw %}
+#user=service:{{ glance_keystone_user }}
+#key={{ glance_keystone_password }}
+#auth_version=2
+#auth_address={{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v2.0
+#user_domain_id=default
+#project_domain_id=default
+#{% endraw %}
+
+{% endif %}
+
+{% if kolla_extra_glance %}
+#######################
+# Extra configuration
+#######################
+
+{{ kolla_extra_glance }}
+{% endif %}
diff --git a/ansible/roles/kolla-openstack/templates/ironic.conf.j2 b/ansible/roles/kolla-openstack/templates/ironic.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..8c59dd79fa15ef39c4dfc23c5e0a3772141f11a8
--- /dev/null
+++ b/ansible/roles/kolla-openstack/templates/ironic.conf.j2
@@ -0,0 +1,22 @@
+# {{ ansible_managed }}
+
+[DEFAULT]
+enabled_drivers = {{ ironic_drivers | join(',') }}
+
+[conductor]
+{% raw %}
+api_url = {{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + provision_interface | replace('-', '_')]['ipv4']['address'] }}:{{ ironic_api_port }}
+{% endraw %}
+
+[pxe]
+{% raw %}
+tftp_server = {{ hostvars[inventory_hostname]['ansible_' + provision_interface | replace('-', '_')]['ipv4']['address'] }}
+{% endraw %}
+
+{% if kolla_extra_ironic %}
+#######################
+# Extra configuration
+#######################
+
+{{ kolla_extra_ironic }}
+{% endif %}
diff --git a/ansible/roles/kolla/defaults/main.yml b/ansible/roles/kolla/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..061756399c81f1ee5869081ecff592de1f71a97e
--- /dev/null
+++ b/ansible/roles/kolla/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Virtualenv directory where Kolla will be installed.
+kolla_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
+
+# Directory where Kolla config files will be installed.
+kolla_config_path:
diff --git a/ansible/roles/kolla/tasks/main.yml b/ansible/roles/kolla/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d4af85850b76d9cf5d5e361d7b8eda605c79438e
--- /dev/null
+++ b/ansible/roles/kolla/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+- name: Ensure required packages are installed
+  yum:
+    name: "{{ item }}"
+    state: installed
+  become: True
+  with_items:
+    - gcc
+    - libffi-devel
+    - openssl-devel
+    - patch
+    - python-devel
+    - python-pip
+    - python-virtualenv
+
+- name: Ensure the latest version of pip is installed
+  pip:
+    name: "{{ item.name }}"
+    state: latest
+    virtualenv: "{{ kolla_venv }}"
+  with_items:
+    - { name: pip }
+
+- name: Ensure required Python packages are installed
+  pip:
+    name: "{{ item.name }}"
+    version: "{{ item.version }}"
+    state: present
+    virtualenv: "{{ kolla_venv }}"
+  with_items:
+    - { name: kolla, version: "{{ kolla_openstack_release }}" }
+    # Required for kolla-genpwd.
+    - { name: PyYAML, version: "3.12" }
+
+- name: Ensure the Kolla configuration directores exist
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: 0755
+  become: True
+  with_items:
+    - "{{ kolla_config_path }}"
diff --git a/ansible/roles/libvirt-vm/defaults/main.yml b/ansible/roles/libvirt-vm/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e7a1e196afc2803b6bd5c2e2ddcf130a563f0016
--- /dev/null
+++ b/ansible/roles/libvirt-vm/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+# Name of the VM.
+libvirt_vm_name:
+
+# Memory in MB.
+libvirt_vm_memory_mb:
+
+# Number of vCPUs.
+libvirt_vm_vcpus:
+
+# List of volumes.
+libvirt_vm_volumes: []
+
+# List of network interfaces.
+libvirt_vm_interfaces: []
+
+# Path to cache downloaded images.
+libvirt_vm_image_cache_path:
+
+# List of authorized SSH public keys.
+#libvirt_vm_public_keys: []
diff --git a/ansible/roles/libvirt-vm/files/virt_volume.sh b/ansible/roles/libvirt-vm/files/virt_volume.sh
new file mode 100644
index 0000000000000000000000000000000000000000..75048481d4210cac927a970488a2bf46df8e198b
--- /dev/null
+++ b/ansible/roles/libvirt-vm/files/virt_volume.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# Ensure that a libvirt volume exists, optionally uploading an image.
+# On success, output a JSON object with a 'changed' item.
+
+if [[ $# -ne 4 ]] && [[ $# -ne 5 ]]; then
+    echo "Usage: $0 <name> <pool> <capacity> <format> [<image>]"
+    exit 1
+fi
+
+NAME=$1
+POOL=$2
+CAPACITY=$3
+FORMAT=$4
+IMAGE=$5
+
+# Check whether a volume with this name exists.
+output=$(virsh vol-info --pool $POOL --vol $NAME 2>&1)
+result=$?
+if [[ $result -eq 0 ]]; then
+    echo '{"changed": false}'
+    exit 0
+elif ! echo "$output" | grep 'Storage volume not found' >/dev/null 2>&1; then
+    echo "Unexpected error while getting volume info"
+    echo "$output"
+    exit $result
+fi
+
+# Create the volume.
+output=$(virsh vol-create-as --pool $POOL --name $NAME --capacity $CAPACITY --format $FORMAT 2>&1)
+result=$?
+if [[ $result -ne 0 ]]; then
+    echo "Failed to create volume"
+    echo "$output"
+    exit $result
+fi
+
+if [[ -n $IMAGE ]]; then
+    # Upload an image to the volume.
+    output=$(virsh vol-upload --pool $POOL --vol $NAME --file $IMAGE 2>&1)
+    result=$?
+    if [[ $result -ne 0 ]]; then
+        echo "Failed to upload image $IMAGE to volume $NAME"
+        echo "$output"
+        virsh vol-delete --pool $POOL --vol $NAME
+        exit $result
+    fi
+
+    # Resize the volume to the requested capacity.
+    output=$(virsh vol-resize --pool $POOL --vol $NAME --capacity $CAPACITY 2>&1)
+    result=$?
+    if [[ $result -ne 0 ]]; then
+        echo "Failed to resize volume $VOLUME to $CAPACITY"
+        echo "$output"
+        exit $result
+    fi
+fi
+
+echo '{"changed": true}'
+exit 0
diff --git a/ansible/roles/libvirt-vm/tasks/main.yml b/ansible/roles/libvirt-vm/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c25c34040f7f66a91d096e4850af087a22f2c4be
--- /dev/null
+++ b/ansible/roles/libvirt-vm/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: volumes.yml
+- include: vm.yml
diff --git a/ansible/roles/libvirt-vm/tasks/vm.yml b/ansible/roles/libvirt-vm/tasks/vm.yml
new file mode 100644
index 0000000000000000000000000000000000000000..40c28c2d0ad13c3bc833a719ed123b020bdb0eed
--- /dev/null
+++ b/ansible/roles/libvirt-vm/tasks/vm.yml
@@ -0,0 +1,16 @@
+---
+- name: Ensure the VM is defined
+  virt:
+    name: "{{ libvirt_vm_name }}"
+    command: define
+    xml: "{{ lookup('template', 'vm.xml.j2') }}"
+
+- name: Ensure the VM is running
+  virt:
+    name: "{{ libvirt_vm_name }}"
+    state: running
+
+- name: Ensure the VM is started at boot
+  virt:
+    name: "{{ libvirt_vm_name }}"
+    command: autostart
diff --git a/ansible/roles/libvirt-vm/tasks/volumes.yml b/ansible/roles/libvirt-vm/tasks/volumes.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4924b12056187640e369aa35d004ffeba322cdf2
--- /dev/null
+++ b/ansible/roles/libvirt-vm/tasks/volumes.yml
@@ -0,0 +1,30 @@
+---
+- name: Ensure remote images are downloaded
+  get_url:
+    url: "{{ item }}"
+    dest: "{{ libvirt_vm_image_cache_path }}/{{ item | basename }}"
+  with_items: "{{ libvirt_vm_volumes | selectattr('image', 'defined') | map(attribute='image') | list }}"
+  when: "{{ 'http' in item }}"
+
+- name: Ensure local images are copied
+  copy:
+    src: "{{ item }}"
+    dest: "{{ libvirt_vm_image_cache_path }}/{{ item | basename }}"
+  with_items: "{{ libvirt_vm_volumes | selectattr('image', 'defined') | map(attribute='image') | list }}"
+  when: "{{ 'http' not in item }}"
+
+- name: Ensure the VM volumes exist
+  script: >
+    virt_volume.sh
+    {{ item.name }}
+    {{ item.pool }}
+    {{ item.capacity }}
+    {{ item.format | default('qcow2') }}
+    {% if item.image is defined %}
+    {{ libvirt_vm_image_cache_path }}/{{ item.image | basename }}
+    {% endif %}
+  with_items: "{{ libvirt_vm_volumes }}"
+  register: volume_result
+  changed_when:
+    - "{{ volume_result | success }}"
+    - "{{ (volume_result.stdout | from_json).changed | default(True) }}"
diff --git a/ansible/roles/libvirt-vm/templates/vm.xml.j2 b/ansible/roles/libvirt-vm/templates/vm.xml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..aac3318ebdb18f4c95907b5a9888d6f0f90bc637
--- /dev/null
+++ b/ansible/roles/libvirt-vm/templates/vm.xml.j2
@@ -0,0 +1,30 @@
+<domain type='kvm'>
+  <name>{{ libvirt_vm_name }}</name>
+  <memory>{{ libvirt_vm_memory_mb | int * 1024 }}</memory>
+  <vcpu>{{ libvirt_vm_vcpus }}</vcpu>
+  <clock sync="localtime"/>
+  <os>
+    <type arch='x86_64'>hvm</type>
+  </os>
+  <devices>
+{% for volume in libvirt_vm_volumes %}
+    <disk type='volume' device='{{ volume.device | default('disk') }}'>
+      <driver name='qemu' type='{{ volume.format }}'/>
+      <source pool='{{ volume.pool }}' volume='{{ volume.name }}'/>
+      <target dev='vd{{ 'abcdefghijklmnopqrstuvwxyz'[loop.index] }}'/>
+    </disk>
+{% endfor %}
+{% for interface in libvirt_vm_interfaces %}
+    <interface type='network'>
+      <source network='{{ interface.network }}'/>
+      <model type='virtio'/>
+    </interface>
+{% endfor %}
+    <serial type='pty'>
+       <target port='0'/>
+     </serial>
+     <console type='pty'>
+       <target type='serial' port='0'/>
+     </console>
+  </devices>
+</domain>
diff --git a/ansible/roles/ssh-known-host/tasks/main.yml b/ansible/roles/ssh-known-host/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..babfcd467f0e3fbc907e80e7ff4403604ef3d96b
--- /dev/null
+++ b/ansible/roles/ssh-known-host/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- name: Scan for SSH keys
+  local_action:
+    module: command ssh-keyscan {{ item }}
+  with_items:
+    - "{{ ansible_host|default(inventory_hostname) }}"
+  register: keyscan_result
+  changed_when: False
+
+- name: Ensure SSH keys are in known hosts
+  local_action:
+    module: known_hosts
+    host: "{{ item[0].item }}"
+    key: "{{ item[1] }}"
+  with_subelements:
+    - "{{ keyscan_result.results }}"
+    - stdout_lines
diff --git a/ansible/roles/swift-setup/defaults/main.yml b/ansible/roles/swift-setup/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cfc0e1afe5ea762abb9206e47fb53a6b64a0c754
--- /dev/null
+++ b/ansible/roles/swift-setup/defaults/main.yml
@@ -0,0 +1,34 @@
+---
+# List of names of block devices to use for Swift.
+swift_block_devices: []
+
+# Docker image to use to build rings.
+swift_image:
+
+# Host on which to build rings.
+swift_ring_build_host:
+
+# Path in which to build ring files.
+swift_ring_build_path: /tmp/swift-rings
+
+# Ports on which Swift services listen.
+swift_service_ports:
+  object: 6000
+  account: 6001
+  container: 6002
+
+# Base-2 logarithm of the number of partitions.
+# i.e. num_partitions=2^<swift_part_power>.
+swift_part_power:
+
+# Object replication count.
+swift_replication_count:
+
+# Minimum time in hours between moving a given partition.
+swift_min_part_hours:
+
+# ID of the region for this Swift service.
+swift_region:
+
+# ID of the zone for this Swift service.
+swift_zone:
diff --git a/ansible/roles/swift-setup/tasks/devices.yml b/ansible/roles/swift-setup/tasks/devices.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0daeee8a0709510bff7a1dddd1180e6ffed1e209
--- /dev/null
+++ b/ansible/roles/swift-setup/tasks/devices.yml
@@ -0,0 +1,10 @@
+---
+- name: Ensure Swift partitions exist
+  command: parted /dev/{{ item }} -s -- mklabel gpt mkpart KOLLA_SWIFT_DATA 1 -1
+  with_items: "{{ swift_block_devices }}"
+  become: True
+
+- name: Ensure Swift XFS file systems exist
+  command: mkfs.xfs -f -L d{{ swift_block_devices.index(item) }} /dev/{{ item }}{% if item.startswith('loop') %}p{% endif %}1
+  with_items: "{{ swift_block_devices }}"
+  become: True
diff --git a/ansible/roles/swift-setup/tasks/main.yml b/ansible/roles/swift-setup/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f342e69bb4dfe4c5652cbdbb474fecf97854ea0f
--- /dev/null
+++ b/ansible/roles/swift-setup/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: devices.yml
+- include: rings.yml
diff --git a/ansible/roles/swift-setup/tasks/rings.yml b/ansible/roles/swift-setup/tasks/rings.yml
new file mode 100644
index 0000000000000000000000000000000000000000..af092aa3614b772164e5e9945bfd24b4f9aaab32
--- /dev/null
+++ b/ansible/roles/swift-setup/tasks/rings.yml
@@ -0,0 +1,75 @@
+---
+- name: Ensure Swift ring build directory exists
+  file:
+    path: "{{ swift_ring_build_path }}"
+    state: directory
+  delegate_to: "{{ swift_ring_build_host }}"
+  run_once: True
+
+- name: Ensure Swift rings are created
+  command: >
+    docker run
+      --rm
+      -v {{ swift_ring_build_path }}/:{{ kolla_config_path }}/config/swift/
+      {{ swift_image }}
+      swift-ring-builder {{ kolla_config_path }}/config/swift/{{ item }}.builder create
+        {{ swift_part_power }}
+        {{ swift_replication_count }}
+        {{ swift_min_part_hours }}
+  with_items: "{{ swift_service_names }}"
+  delegate_to: "{{ swift_ring_build_host }}"
+  run_once: True
+
+- name: Ensure devices are added to Swift rings
+  command: >
+    docker run
+      --rm
+      -v {{ swift_ring_build_path }}/:{{ kolla_config_path }}/config/swift/
+      {{ swift_image }}
+      swift-ring-builder {{ kolla_config_path }}/config/swift/{{ item[0] }}.builder add
+        --region {{ swift_region }}
+        --zone {{ swift_zone }}
+        --ip {{ internal_net_name | net_ip }}
+        --port {{ swift_service_ports[item[0]] }}
+        --device {{ item[1] }}
+        --weight 100
+  with_nested:
+    - "{{ swift_service_names }}"
+    - "{{ swift_block_devices }}"
+  delegate_to: "{{ swift_ring_build_host }}"
+
+- name: Ensure Swift rings are rebalanced
+  command: >
+    docker run
+      --rm
+      -v {{ swift_ring_build_path }}/:{{ kolla_config_path }}/config/swift/
+      {{ swift_image }}
+      swift-ring-builder {{ kolla_config_path }}/config/swift/{{ item }}.builder rebalance
+  with_items: "{{ swift_service_names }}"
+  delegate_to: "{{ swift_ring_build_host }}"
+  run_once: True
+
+- name: Ensure Swift ring files are copied
+  local_action:
+    module: copy
+    src: "{{ swift_ring_build_path }}/{{ item[0] }}.{{ item[1] }}"
+    dest: "{{ kolla_config_path }}/config/swift/{{ item[0] }}.{{ item[1] }}"
+    remote_src: True
+    owner: kolla
+    group: kolla
+    mode: 0644
+  with_nested:
+    - "{{ swift_service_names }}"
+    - - ring.gz
+      - builder
+  delegate_to: "{{ swift_ring_build_host }}"
+  become: True
+  run_once: True
+
+- name: Remove Swift ring build directory from build host
+  file:
+    path: "{{ swift_ring_build_path }}"
+    state: absent
+  delegate_to: "{{ swift_ring_build_host }}"
+  become: True
+  run_once: True
diff --git a/ansible/roles/swift-setup/vars/main.yml b/ansible/roles/swift-setup/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c6b67fbc8b3253cba149b1a0523fe8f1b71f2078
--- /dev/null
+++ b/ansible/roles/swift-setup/vars/main.yml
@@ -0,0 +1,6 @@
+---
+# List of names of Swift services.
+swift_service_names:
+  - object
+  - account
+  - container
diff --git a/ansible/roles/veth/defaults/main.yml b/ansible/roles/veth/defaults/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e72b743416415d27845db78e37c31639efbf69dd
--- /dev/null
+++ b/ansible/roles/veth/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# List of virtual interfaces to configure.
+veth_interfaces: []
diff --git a/ansible/roles/veth/files/ifdown-veth b/ansible/roles/veth/files/ifdown-veth
new file mode 100755
index 0000000000000000000000000000000000000000..4b65815ad9c6ce46f42705845230345d7f0f31e6
--- /dev/null
+++ b/ansible/roles/veth/files/ifdown-veth
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Take down a virtual ethernet device pair.
+
+. /etc/init.d/functions
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+CONFIG=${1}
+
+need_config "${CONFIG}"
+
+source_config
+
+./ifdown-eth ${CONFIG} ${2}
+./ifdown-eth ifcfg-${PEER_DEVICE} ${2}
diff --git a/ansible/roles/veth/files/ifup-veth b/ansible/roles/veth/files/ifup-veth
new file mode 100755
index 0000000000000000000000000000000000000000..9007cd1dfe3d79b15f18e9ead020dc523d01d81d
--- /dev/null
+++ b/ansible/roles/veth/files/ifup-veth
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# Bring up a virtual ethernet device pair.
+
+. /etc/init.d/functions
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+CONFIG=${1}
+
+need_config "${CONFIG}"
+
+source_config
+
+if ! ip link show dev ${DEVICE} >/dev/null 2>&1 ; then
+    echo "Creating veth pair ${DEVICE} - ${PEER_DEVICE}"
+    ip link add dev ${DEVICE} type veth peer name ${PEER_DEVICE}
+    if [[ $? -ne 0 ]]; then
+        echo "Failed creating veth pair"
+        exit 1
+    fi
+fi
+
+./ifup-eth ${CONFIG} ${2}
+./ifup-eth ifcfg-${PEER_DEVICE} ${2}
diff --git a/ansible/roles/veth/tasks/main.yml b/ansible/roles/veth/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d96c5701da9136fc0c6e55461608b2d5905fa78b
--- /dev/null
+++ b/ansible/roles/veth/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+- name: Ensure veth sysconfig network control scripts exist
+  copy:
+    src: "{{ item }}"
+    dest: "{{ network_scripts_dir }}/{{ item }}"
+    owner: root
+    group: root
+    mode: 0755
+  with_items:
+    - "ifup-veth"
+    - "ifdown-veth"
+  become: True
+  register: ctl_result
+
+- name: Ensure veth sysconfig network interface files exist
+  template:
+    src: ifcfg-veth.j2
+    dest: "{{ network_scripts_dir }}/ifcfg-{{ item.device }}"
+    owner: root
+    group: root
+    mode: 0755
+  with_items: "{{ veth_interfaces }}"
+  become: True
+  register: veth_result
+
+- name: Ensure veth peer sysconfig network interface files exist
+  template:
+    src: ifcfg-peer.j2
+    dest: "{{ network_scripts_dir }}/ifcfg-{{ item.peer_device }}"
+    owner: root
+    group: root
+    mode: 0755
+  with_items: "{{ veth_interfaces }}"
+  become: True
+  register: peer_result
+
+- name: Bounce veth interfaces
+  shell: ifdown {{ item[0].item.device }} ; ifup {{ item[0].item.device }}
+  with_together:
+    - "{{ veth_result.results }}"
+    - "{{ peer_result.results }}"
+  when: "{{ ctl_result|changed or item[0]|changed or item[1]|changed }}"
+  become: True
diff --git a/ansible/roles/veth/templates/ifcfg-peer.j2 b/ansible/roles/veth/templates/ifcfg-peer.j2
new file mode 100644
index 0000000000000000000000000000000000000000..dfa818c25dd8e013a2d565f0b421f192282ad98b
--- /dev/null
+++ b/ansible/roles/veth/templates/ifcfg-peer.j2
@@ -0,0 +1,25 @@
+DEVICE={{ item.peer_device }}
+{% if item.peer_bootproto == 'static' %}
+BOOTPROTO=none
+{% if item.peer_address is defined %}
+IPADDR={{ item.peer_address }}
+{% endif %}
+{% if item.peer_netmask is defined %}
+NETMASK={{ item.peer_netmask }}
+{% endif %}
+{% if item.peer_gateway is defined %}
+GATEWAY={{ item.peer_gateway }}
+{% endif %}
+{% endif %}
+{% if item.peer_bootproto == 'dhcp' %}
+BOOTPROTO=dhcp
+{% endif %}
+{% if item.onboot is defined %}
+ONBOOT={{ item.onboot }}
+{% endif %}
+{% if item.peer_bridge is defined %}
+BRIDGE={{ item.peer_bridge }}
+{% endif %}
+{% if ansible_distribution_major_version | int >= 7 %}
+NM_CONTROLLED=no
+{% endif %}
diff --git a/ansible/roles/veth/templates/ifcfg-veth.j2 b/ansible/roles/veth/templates/ifcfg-veth.j2
new file mode 100644
index 0000000000000000000000000000000000000000..1f7e517f0233f55f90a5c2a679649c39bf3abcdd
--- /dev/null
+++ b/ansible/roles/veth/templates/ifcfg-veth.j2
@@ -0,0 +1,27 @@
+DEVICE={{ item.device }}
+TYPE=veth
+PEER_DEVICE={{ item.peer_device }}
+{% if item.bootproto == 'static' %}
+BOOTPROTO=none
+{% if item.address is defined %}
+IPADDR={{ item.address }}
+{% endif %}
+{% if item.netmask is defined %}
+NETMASK={{ item.netmask }}
+{% endif %}
+{% if item.gateway is defined %}
+GATEWAY={{ item.gateway }}
+{% endif %}
+{% endif %}
+{% if item.bootproto == 'dhcp' %}
+BOOTPROTO=dhcp
+{% endif %}
+{% if item.onboot is defined %}
+ONBOOT={{ item.onboot }}
+{% endif %}
+{% if item.bridge is defined %}
+BRIDGE={{ item.bridge }}
+{% endif %}
+{% if ansible_distribution_major_version | int >= 7 %}
+NM_CONTROLLED=no
+{% endif %}
diff --git a/ansible/roles/veth/vars/main.yml b/ansible/roles/veth/vars/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..179bad08d01c053e1bf4fa659a2ab900dce2f195
--- /dev/null
+++ b/ansible/roles/veth/vars/main.yml
@@ -0,0 +1,2 @@
+---
+network_scripts_dir: "/etc/sysconfig/network-scripts"
diff --git a/ansible/seed-vm.yml b/ansible/seed-vm.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a75ac4548a79d74cf884581eb05badf5fcd902aa
--- /dev/null
+++ b/ansible/seed-vm.yml
@@ -0,0 +1,77 @@
+---
+- name: Ensure that the seed VM configdrive exists
+  hosts: seed-hypervisor
+  vars:
+    seed_host: "{{ groups['seed'][0] }}"
+    seed_hostvars: "{{ hostvars[seed_host] }}"
+  pre_tasks:
+    - name: Ensure the image cache directory exists
+      file:
+        path: "{{ image_cache_path }}"
+        state: directory
+
+  roles:
+    - role: jriguera.configdrive
+      # For now assume the VM OS family is the same as the hypervisor's.
+      configdrive_os_family: "{{ ansible_os_family }}"
+      configdrive_uuid: "{{ seed_host | to_uuid }}"
+      configdrive_fqdn: "{{ seed_host }}"
+      configdrive_name: "{{ seed_host }}"
+      configdrive_ssh_public_key: "{{ lookup('file', '{{ ansible_user_dir }}/.ssh/id_rsa.pub') }}"
+      configdrive_config_dir: "{{ image_cache_path }}"
+      configdrive_volume_path: "{{ image_cache_path }}"
+      configdrive_config_dir_delete: True
+      configdrive_resolv:
+        domain: "{{ seed_hostvars.resolv_domain | default }}"
+        search: "{{ seed_hostvars.resolv_search | default }}"
+        dns: "{{ seed_hostvars.resolv_nameservers | default([]) }}"
+      configdrive_network_device_list: >
+        {{ seed_hostvars.seed_vm_interfaces |
+           map(attribute='net_name') |
+           map('net_configdrive_network_device', seed_host) |
+           list }}
+
+  tasks:
+    - name: Set a fact containing the configdrive image path
+      set_fact:
+        seed_vm_configdrive_path: "{{ image_cache_path }}/{{ seed_host }}.iso"
+
+    - name: Ensure configdrive is decoded and decompressed
+      shell: >
+          base64 -d {{ image_cache_path }}/{{ seed_host | to_uuid }}.gz
+          | gunzip
+          > {{ seed_vm_configdrive_path }}
+
+    - name: Ensure compressed configdrive is removed
+      file:
+        path: "{{ image_cache_path }}/{{ seed_host | to_uuid }}.gz"
+        state: absent
+
+- name: Ensure that the seed VM is provisioned
+  hosts: seed-hypervisor
+  vars:
+    seed_host: "{{ groups['seed'][0] }}"
+    seed_hostvars: "{{ hostvars[seed_host] }}"
+  pre_tasks:
+    - name: Check the size of the configdrive
+      stat:
+        path: "{{ seed_vm_configdrive_path }}"
+      register: stat_result
+
+  roles:
+    - role: libvirt-vm
+      seed_vm_configdrive_volume:
+        name: "{{ seed_hostvars.seed_vm_name }}-configdrive"
+        pool: "{{ seed_hostvars.seed_vm_pool }}"
+        # Round size up to next multiple of 4096.
+        capacity: "{{ (stat_result.stat.size + 4095) // 4096 * 4096 }}"
+        device: "cdrom"
+        format: "raw"
+        image: "{{ seed_vm_configdrive_path }}"
+      libvirt_vm_name: "{{ seed_hostvars.seed_vm_name }}"
+      libvirt_vm_memory_mb: "{{ seed_hostvars.seed_vm_memory_mb }}"
+      libvirt_vm_vcpus: "{{ seed_hostvars.seed_vm_vcpus }}"
+      libvirt_vm_volumes: "{{ seed_hostvars.seed_vm_volumes + [seed_vm_configdrive_volume] }}"
+      libvirt_vm_interfaces: "{{ seed_hostvars.seed_vm_interfaces }}"
+      libvirt_vm_image_cache_path: "{{ image_cache_path }}"
+      become: True
diff --git a/ansible/ssh-known-host.yml b/ansible/ssh-known-host.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a3f6e4b725d2381ca75505a23f8ba2ee0f185ddf
--- /dev/null
+++ b/ansible/ssh-known-host.yml
@@ -0,0 +1,7 @@
+---
+- name: Ensure known hosts are configured
+  hosts: all
+  gather_facts: no
+  roles:
+    - role: ssh-known-host
+
diff --git a/ansible/swift-setup.yml b/ansible/swift-setup.yml
new file mode 100644
index 0000000000000000000000000000000000000000..01e31c03f698c6acc20d91a2d057dd426214b84b
--- /dev/null
+++ b/ansible/swift-setup.yml
@@ -0,0 +1,11 @@
+---
+- hosts: controllers
+  roles:
+    - role: swift-setup
+      swift_image: "kolla/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-base:{{ kolla_openstack_release }}"
+      swift_ring_build_host: "{{ groups['controllers'][0] }}"
+      # ID of the region for this Swift service.
+      swift_region: 1
+      # ID of the zone for this Swift service.
+      swift_zone: "{{ groups['controllers'].index(inventory_hostname) % swift_num_zones }}"
+      when: "{{ kolla_enable_swift | bool }}"
diff --git a/ansible/test-image-centos-cloud.yml b/ansible/test-image-centos-cloud.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8ad51a64598aa41cfcd9b889d05061f936bcb042
--- /dev/null
+++ b/ansible/test-image-centos-cloud.yml
@@ -0,0 +1,35 @@
+---
+- hosts: seed[0]
+  vars:
+    openstack_auth:
+      auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+      username: "{{ lookup('env', 'OS_USERNAME') }}"
+      password: "{{ lookup('env', 'OS_PASSWORD') }}"
+      project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
+      project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
+      user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+    image_download_dir: "{{ ansible_user_dir }}/images"
+  tasks:
+    - name: Ensure OpenStack shade module is installed
+      pip:
+        name: shade
+      become: True
+
+    - name: Ensure image download directory exists
+      file:
+        path: "{{ image_download_dir }}"
+        state: directory
+
+    - name: Ensure CentOS 7 cloud image is downloaded
+      get_url:
+        url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+        dest: "{{ image_download_dir }}/CentOS-7-x86_64-GenericCloud.qcow2"
+
+    - name: Ensure test deployment image is registered with Glance
+      os_image:
+        auth: "{{ openstack_auth }}"
+        name: centos7
+        container_format: bare
+        disk_format: qcow2
+        state: present
+        filename: "{{ image_download_dir }}/CentOS-7-x86_64-GenericCloud.qcow2"
diff --git a/ansible/test-image.yml b/ansible/test-image.yml
new file mode 100644
index 0000000000000000000000000000000000000000..27e3f2fa39efa721bf0f6f8ace7b83bb47e6b15d
--- /dev/null
+++ b/ansible/test-image.yml
@@ -0,0 +1,87 @@
+---
+- name: Ensure user images are built and registered with Glance
+  hosts: seed[0]
+  vars:
+    openstack_auth:
+      auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+      username: "{{ lookup('env', 'OS_USERNAME') }}"
+      password: "{{ lookup('env', 'OS_PASSWORD') }}"
+      project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
+      project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
+      user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+    image_build_dir: "{{ ansible_user_dir }}/images/dib"
+    image_name: centos7
+    image_os_element: centos7
+    image_base_elements:
+      - dhcp-all-interfaces
+    image_is_whole_disk: True
+    image_whole_disk_elements:
+      - vm
+    image_partition_elements:
+      - baremetal
+      - grub2
+    image_extra_elements: []
+    image_elements: "{{ image_base_elements + (image_whole_disk_elements if image_is_whole_disk|bool else image_partition_elements) + image_extra_elements }}"
+  tasks:
+    - name: Ensure diskimage-builder package is installed
+      yum:
+        name: diskimage-builder
+        state: installed
+      become: True
+
+    - name: Ensure OpenStack shade module is installed
+      pip:
+        name: shade
+      become: True
+
+    - name: Ensure image build directory exists
+      file:
+        path: "{{ image_build_dir }}"
+        state: directory
+
+    - name: Display image elements
+      debug:
+        var: image_elements
+
+    - name: Ensure CentOS 7 image is built
+      command: >
+        disk-image-create
+        {{ image_os_element }}
+        {{ image_elements|join(' ') }}
+        -o {{ image_name }}
+      args:
+        chdir: "{{ image_build_dir }}"
+        creates: "{{ image_build_dir }}/{{ image_name }}.qcow2"
+
+    - name: Ensure test deployment ramdisk and kernel images are registered with Glance
+      os_image:
+        auth: "{{ openstack_auth }}"
+        name: "{{ image_name }}.{{ item.ext }}"
+        container_format: "{{ item.container_format }}"
+        disk_format: "{{ item.disk_format }}"
+        filename: "{{ image_build_dir }}/{{ image_name }}.{{ item.ext }}"
+        state: present
+      with_items:
+        - { container_format: ari, disk_format: ari, ext: initrd }
+        - { container_format: aki, disk_format: aki, ext: vmlinuz }
+      register: r_and_k_result
+      when: not image_is_whole_disk|bool
+
+    - name: Ensure test deployment image is registered with Glance
+      os_image:
+        auth: "{{ openstack_auth }}"
+        name: "{{ image_name }}"
+        container_format: bare
+        disk_format: qcow2
+        filename: "{{ image_build_dir }}/{{ image_name }}.qcow2"
+        state: present
+
+    # FIXME: This does not seem to work :(
+    - name: Ensure test deployment image has kernel and ramdisk properties
+      os_image:
+        auth: "{{ openstack_auth }}"
+        name: "{{ image_name }}"
+        ramdisk: "{{ image_name }}.initrd"
+        kernel: "{{ image_name }}.vmlinuz"
+        state: present
+      when: not image_is_whole_disk|bool
diff --git a/ansible/test-infra.yml b/ansible/test-infra.yml
new file mode 100644
index 0000000000000000000000000000000000000000..89498b2fda10a72b70d40cd12d1220e37f66ed74
--- /dev/null
+++ b/ansible/test-infra.yml
@@ -0,0 +1,58 @@
+---
+- hosts: seed[0]
+  vars:
+    openstack_auth:
+      auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+      username: "{{ lookup('env', 'OS_USERNAME') }}"
+      password: "{{ lookup('env', 'OS_PASSWORD') }}"
+      project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
+      project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
+      user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+    image_download_dir: "{{ ansible_user_dir }}/images"
+  tasks:
+    - name: Ensure OpenStack shade module is installed
+      pip:
+        name: shade
+      become: True
+
+    - name: Ensure image download directory exists
+      file:
+        path: "{{ image_download_dir }}"
+        state: directory
+
+    - name: Ensure Ironic CoreOS IPA deploy images are downloaded
+      unarchive:
+        src: http://tarballs.openstack.org/ironic-python-agent/coreos/ipa-coreos-stable-newton.tar.gz
+        dest: "{{ image_download_dir }}"
+        remote_src: yes
+
+    - name: Ensure Ironic CoreOS IPA deploy images are registered with Glance
+      os_image:
+        auth: "{{ openstack_auth }}"
+        name: "{{ item.name }}"
+        container_format: "{{ item.format }}"
+        disk_format: "{{ item.format }}"
+        state: present
+        filename: "{{ image_download_dir }}/imagebuild/coreos/UPLOAD/{{ item.filename }}"
+      with_items:
+        - { name: ipa.initrd, filename: coreos_production_pxe_image-oem-stable-newton.cpio.gz, format: ari }
+        - { name: ipa.vmlinuz, filename: coreos_production_pxe-stable-newton.vmlinuz, format: aki }
+
+    - name: Ensure provisioning network is registered with Neutron
+      os_network:
+        auth: "{{ openstack_auth }}"
+        name: provision-net
+        provider_network_type: flat
+        provider_physical_network: physnet1
+        shared: True
+        state: present
+
+    - name: Ensure provisioning subnet is registered with Neutron
+      os_subnet:
+        auth: "{{ openstack_auth }}"
+        name: provision-subnet
+        network_name: provision-net
+        cidr: "{{ provision_net_cidr }}"
+        allocation_pool_start: "{{ provision_net_allocation_pool_start | default(omit) }}"
+        allocation_pool_end: "{{ provision_net_allocation_pool_end | default(omit) }}"
+        state: present
diff --git a/ansible/test-keypair.yml b/ansible/test-keypair.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ee001cb6bd6f9c7698eb2d4eb3951c87f9e70839
--- /dev/null
+++ b/ansible/test-keypair.yml
@@ -0,0 +1,24 @@
+---
+- name: Ensure user SSH keypair is registered with Nova
+  hosts: seed[0]
+  vars:
+    openstack_auth:
+      auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
+      username: "{{ lookup('env', 'OS_USERNAME') }}"
+      password: "{{ lookup('env', 'OS_PASSWORD') }}"
+      project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
+      project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
+      user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
+    public_key_file: "{{ ansible_user_dir }}/.ssh/id_rsa.pub"
+  tasks:
+    - name: Ensure OpenStack shade module is installed
+      pip:
+        name: shade
+      become: True
+
+    - name: Ensure a test SSH key pair is registered with Nova
+      os_keypair:
+        auth: "{{ openstack_auth }}"
+        name: test
+        public_key_file: "{{ public_key_file }}"
+        state: present
diff --git a/bootstrap.sh b/bootstrap.sh
new file mode 100755
index 0000000000000000000000000000000000000000..996a7d52b8b5d94475dbd64008e60d3a11884f87
--- /dev/null
+++ b/bootstrap.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+set -e
+
+function run_playbook {
+    KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KAYOBE_CONFIG_PATH}/inventory
+    ansible-playbook \
+        -i ${KAYOBE_CONFIG_PATH}/inventory \
+        -e @${KAYOBE_CONFIG_PATH}/globals.yml \
+        -e @${KAYOBE_CONFIG_PATH}/dns.yml \
+        -e @${KAYOBE_CONFIG_PATH}/kolla.yml \
+        -e @${KAYOBE_CONFIG_PATH}/networks.yml \
+        -e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
+        -e @${KAYOBE_CONFIG_PATH}/ntp.yml \
+        -e @${KAYOBE_CONFIG_PATH}/swift.yml \
+        $@
+}
+
+function install_ansible {
+    if [[ -f /etc/centos-release ]]; then
+        sudo yum -y install epel-release
+    elif [[ -f /etc/redhat-release ]]; then
+        sudo subscription-manager repos --enable=qci-1.0-for-rhel-7-rpms
+        if ! yum info epel-release >/dev/null 2>&1 ; then
+            sudo yum -y install \
+                https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+        fi
+    fi
+    sudo yum -y install ansible
+}
+
+function install_ansible_roles {
+    ansible-galaxy install \
+        --roles-path ansible/roles \
+        --role-file ansible/requirements.yml
+}
+
+function bootstrap {
+    run_playbook ansible/bootstrap.yml
+}
+
+function install_kolla {
+    run_playbook ansible/kolla.yml
+}
+
+function main {
+    install_ansible
+    install_ansible_roles
+    bootstrap
+    install_kolla
+}
+
+main $*
diff --git a/configure-kayobe.sh b/configure-kayobe.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c11090943048897bcc577dd67c09c70b6283529b
--- /dev/null
+++ b/configure-kayobe.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+set -e
+
+function configure_kayobe {
+    KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
+    sudo yum -y install python-netaddr
+    sudo mkdir -p ${KAYOBE_CONFIG_PATH}
+    sudo chown ${USER}:${USER} ${KAYOBE_CONFIG_PATH}
+    chmod 755 ${KAYOBE_CONFIG_PATH}
+    cp -r etc/kayobe/* ${KAYOBE_CONFIG_PATH}
+    my_interface=$(ip route get 8.8.8.8 | awk '{ print $5 }')
+    my_ip=$(ip route get 8.8.8.8 | awk '{ print $7 }')
+    gateway=$(ip route get 8.8.8.8 | awk '{ print $3 }')
+    cidr=$(ip a show $my_interface | awk '$1 == "inet" { print $2 }')
+    vip=$(python -c "import netaddr; a = netaddr.IPAddress('$my_ip'); print a+2")
+    cat >> ${KAYOBE_CONFIG_PATH}/ntp.yml << EOF
+
+#######################################################
+# Local config
+timezone: GMT
+EOF
+
+    cat >> ${KAYOBE_CONFIG_PATH}/networks.yml << EOF
+
+#######################################################
+# Local config
+provision_oc_net_name: 'the_net'
+provision_wl_net_name: 'the_net'
+internal_net_name: 'the_net'
+external_net_name: 'the_net'
+storage_net_name: 'the_net'
+storage_mgmt_net_name: 'the_net'
+
+the_net_vip_address: ${vip}
+the_net_cidr: ${cidr}
+the_net_gateway: ${gateway}
+EOF
+
+    cat > ${KAYOBE_CONFIG_PATH}/network-allocation.yml << EOF
+---
+the_net_ips:
+  localhost: ${my_ip}
+EOF
+
+    cat > ${KAYOBE_CONFIG_PATH}/inventory/hosts << EOF
+[config-mgmt]
+# This host acts as the configuration management control host. This must be
+# localhost.
+localhost ansible_connection=local
+
+[seed]
+# This host will provide the Bifrost undercloud.
+localhost ansible_host=127.0.0.1
+
+[controllers]
+# These hosts will provide the OpenStack overcloud.
+EOF
+
+    if [[ -e ~/kayobe-env ]] ; then
+        for controller_ip in $(python -c "import json
+with open('/home/centos/kayobe-env') as f:
+    cfg = json.load(f)
+for ctl_ip in cfg['controller_ips']:
+    print ctl_ip"); do
+            echo "  '$controller_ip': $controller_ip" >> ${KAYOBE_CONFIG_PATH}/network-allocation.yml
+            echo $controller_ip >> ${KAYOBE_CONFIG_PATH}/inventory/hosts
+        done
+    fi
+}
+
+function main {
+    configure_kayobe
+}
+
+main $@
diff --git a/deploy-overcloud.sh b/deploy-overcloud.sh
index cc9bd7d9d2287aaf61cc88fc41d6df3214926056..92f62d3334029cd6b66f5f0ea8407398c3966ebf 100755
--- a/deploy-overcloud.sh
+++ b/deploy-overcloud.sh
@@ -2,68 +2,58 @@
 
 set -e
 
-###########################################################
-# Overcloud
-
-function configure_overcloud_network {
-    echo "TODO: configure overcloud network"
-}
-
-function configure_overcloud_bios_and_raid {
-    echo "TODO: configure overcloud BIOS and RAID"
-}
-
-function deploy_overcloud_servers {
-    # Deploy servers with Bifrost
-    kolla-ansible deploy-servers -i /etc/kolla/inventory/seed
-}
-
-function configure_overcloud_os {
-    #ansible controllers -b -i /etc/kolla/inventory/overcloud -m yum -a 'name=[epel-release, centos-release-openstack-newton]'
-    #ansible controllers -b -i /etc/kolla/inventory/overcloud -m yum -a 'name=[python-pip, vim]'
-
-    # Disable SELiunx
-    ansible controllers -b -i /etc/kolla/inventory/overcloud -m selinux -a 'state=disabled'
-    ansible controllers -b -i /etc/kolla/inventory/overcloud -m command -a 'reboot -f' &
-
-    # Wait for nodes to come back up
-    echo "Waiting for overcloud nodes to come back up"
-    while true ; do
-        ansible controllers -i /etc/kolla/inventory/overcloud -m command -a 'hostname' && break
-    done
-}
-
-function bootstrap_overcloud_kolla {
-    # TODO
-    # Bootstrap seed node
-    kolla-ansible bootstrap-servers -i /etc/kolla/inventory/overcloud
-    ansible controllers -i /etc/kolla/inventory/overcloud -m command -a 'docker ps'
-    ansible controllers -b -i /etc/kolla/inventory/overcloud -m service -a 'name=ntpd state=started enabled=yes'
+function run_playbook {
+    KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KAYOBE_CONFIG_PATH}/inventory
+    ansible-playbook \
+        -i ${KAYOBE_CONFIG_PATH}/inventory \
+        -e @${KAYOBE_CONFIG_PATH}/globals.yml \
+        -e @${KAYOBE_CONFIG_PATH}/dns.yml \
+        -e @${KAYOBE_CONFIG_PATH}/kolla.yml \
+        -e @${KAYOBE_CONFIG_PATH}/networks.yml \
+        -e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
+        -e @${KAYOBE_CONFIG_PATH}/ntp.yml \
+        -e @${KAYOBE_CONFIG_PATH}/swift.yml \
+        $@
 }
 
-function configure_overcloud_docker {
-    echo "TODO: configure overcloud docker"
+function run_kolla_ansible {
+    export KOLLA_CONFIG_PATH=${KOLLA_CONFIG_PATH:-/etc/kolla}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KOLLA_CONFIG_PATH}/inventory/overcloud
+    KOLLA_VENV=$(pwd)/ansible/kolla-venv
+    source ${KOLLA_VENV}/bin/activate
+    kolla-ansible \
+        --configdir ${KOLLA_CONFIG_PATH} \
+        --passwords ${KOLLA_CONFIG_PATH}/passwords.yml \
+        -i ${KOLLA_CONFIG_PATH}/inventory/overcloud \
+        $@
+    deactivate
 }
 
-function pull_overcloud_images {
-    kolla-ansible pull -i /etc/kolla/inventory/overcloud
+function configure_os {
+    run_playbook ansible/ssh-known-host.yml -l controllers
+    run_playbook ansible/disable-selinux.yml -l controllers
+    run_playbook ansible/network.yml -l controllers
+    run_playbook ansible/ntp.yml -l controllers
+    run_kolla_ansible bootstrap-servers -e ansible_user=${USER}
+    run_playbook ansible/kolla-host.yml -l controllers
+    run_playbook ansible/docker.yml -l controllers
 }
 
-function deploy_overcloud_services {
-    kolla-ansible prechecks -i /etc/kolla/inventory/overcloud
-    kolla-ansible deploy -i /etc/kolla/inventory/overcloud
-    kolla-ansible post-deploy -i /etc/kolla/inventory/overcloud
+function deploy_services {
+    run_playbook ansible/kolla-openstack.yml
+    run_playbook ansible/swift-setup.yml
+    run_kolla_ansible pull
+    run_kolla_ansible prechecks
+    run_kolla_ansible deploy
+    run_kolla_ansible post-deploy
 }
 
 function deploy_overcloud {
-    configure_overcloud_network
-    configure_overcloud_bios_and_raid
-    deploy_overcloud_servers
-    configure_overcloud_os
-    bootstrap_overcloud_kolla
-    configure_overcloud_docker
-    pull_overcloud_images
-    deploy_overcloud_services
+    configure_os
+    deploy_services
 }
 
 ###########################################################
diff --git a/deploy-seed.sh b/deploy-seed.sh
index 9830d6cc2f4ff9f847fb5fd9df97e6a4f3f873f5..321e503b7ffc3e24cdf7e8b51304c8a6c5f435bd 100755
--- a/deploy-seed.sh
+++ b/deploy-seed.sh
@@ -2,605 +2,58 @@
 
 set -e
 
-###########################################################
-# Seed node
-
-function configure_seed_os {
-    sudo yum -y install epel-release
-    sudo yum -y install ansible
-    sudo yum -y install git vim
-
-    # Generate an SSH key
-    if [[ ! -f ~/.ssh/id_rsa ]]; then
-        ssh-keygen -N '' -f ~/.ssh/id_rsa
-    fi
-    ansible localhost -m authorized_key -a "user=$(whoami) key='$(cat ~/.ssh/id_rsa.pub)'"
-    ssh-keyscan 127.0.0.1 >> ~/.ssh/known_hosts
-    ssh-keyscan localhost >> ~/.ssh/known_hosts
-
-    # Disable SELiunx
-    if selinuxenabled && [[ $(getenforce) = 'Enforcing' ]] ; then
-        echo "Disabling SELinux and rebooting. Re-run this script"
-        ansible localhost -b -m selinux -a 'state=disabled'
-        sudo reboot -f
-    fi
+function run_playbook {
+    KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KAYOBE_CONFIG_PATH}/inventory
+    ansible-playbook \
+        -i ${KAYOBE_CONFIG_PATH}/inventory \
+        -e @${KAYOBE_CONFIG_PATH}/bifrost.yml \
+        -e @${KAYOBE_CONFIG_PATH}/dns.yml \
+        -e @${KAYOBE_CONFIG_PATH}/globals.yml \
+        -e @${KAYOBE_CONFIG_PATH}/kolla.yml \
+        -e @${KAYOBE_CONFIG_PATH}/networks.yml \
+        -e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
+        -e @${KAYOBE_CONFIG_PATH}/ntp.yml \
+        -e @${KAYOBE_CONFIG_PATH}/swift.yml \
+        $@
 }
 
-function install_kolla {
-    # Install kolla
-    sudo yum -y install gcc python-devel python-pip libffi-devel openssl-devel
-    #sudo yum -y install centos-release-openstack-newton
-    #sudo yum -y install python-openstackclient python-neutronclient
-
-    sudo pip install 'kolla<4.0.0'
-    set +e
-    sudo yum -y install patch
-    sudo patch -u -f /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml << EOF
---- /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml.old  2017-01-06 17:23:12.444746830 +0000
-+++ /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml      2017-01-06 17:22:27.864278879 +0000
-@@ -28,6 +28,7 @@
-           {% for host in groups['all'] %}
-           {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }} {{ hostvars[host]['ansible_hostname'] }}
-           {% endfor %}
-+  become: True
-   when: customize_etc_hosts | bool
- 
- - name: ensure sudo group is present
-@@ -126,7 +127,7 @@
-     recurse: yes
-     owner: kolla
-     group: kolla
--    mode: 755
-+    mode: 0755
-   become: True
-   when: create_kolla_user | bool == True
- 
-@@ -135,6 +136,6 @@
-     path: /etc/kolla
-     state: directory
-     recurse: yes
--    mode: 666
-+    mode: 0666
-   become: True
-   when: create_kolla_user | bool == False
-EOF
-    sudo patch -u -f /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2 << EOF
---- /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2.old      2017-01-06 13:56:52.881061188 +0000
-+++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2  2017-01-06 14:00:21.757338271 +0000
-@@ -10,7 +10,7 @@
-     ],
-     "permissions": [
-         {
--            "path": "/var/log/kolla/ironic"
-+            "path": "/var/log/kolla/ironic",
-             "owner": "ironic:ironic",
-             "recurse": true
-         }
-EOF
-    sudo patch -u -f /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2 << EOF
---- /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2.old        2017-01-06 14:28:35.048365453 +0000
-+++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2    2017-01-06 14:28:44.858467071 +0000
-@@ -20,7 +20,7 @@
-             "recurse": true
-         },
-         {
--            "path": "/tftpboot"
-+            "path": "/tftpboot",
-             "owner": "ironic:ironic",
-             "recurse": true
-         }
-EOF
-    set -e
+function run_kolla_ansible {
+    export KOLLA_CONFIG_PATH=${KOLLA_CONFIG_PATH:-/etc/kolla}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KOLLA_CONFIG_PATH}/inventory/seed
+    KOLLA_VENV=$(pwd)/ansible/kolla-venv
+    source ${KOLLA_VENV}/bin/activate
+    kolla-ansible \
+        --configdir ${KOLLA_CONFIG_PATH} \
+        --passwords ${KOLLA_CONFIG_PATH}/passwords.yml \
+        -i ${KOLLA_CONFIG_PATH}/inventory/seed \
+        $@
+    deactivate
 }
 
-function configure_kolla {
-    # Configure Kolla
-    if [[ -d /etc/kolla ]]; then
-        sudo mv /etc/kolla /etc/kolla.old.$(date +%s)
-    fi
-    sudo mkdir -p /etc/kolla
-    sudo chown $(whoami):$(whoami) /etc/kolla
-    mkdir -p /etc/kolla/config /etc/kolla/inventory
-    cat > /etc/kolla/inventory/seed << EOF
-# Simple inventory for bootstrapping Kolla control host.
-[baremetal]
-seed      ansible_host=127.0.0.1 ansible_user=kolla
-EOF
-
-    cat > /etc/kolla/inventory/overcloud << EOF
-[controllers]
-# These hostname must be resolvable from your deployment host
-control01      ansible_host=${CONTROLLER_IP} ansible_user=kolla ansible_become=true
-
-# These initial groups are the only groups required to be modified. The
-# additional groups are for more control of the environment.
-[control:children]
-controllers
-
-# The network nodes are where your l3-agent and loadbalancers will run
-# This can be the same as a host in the control group
-[network:children]
-controllers
-
-[compute:children]
-controllers
-
-[monitoring:children]
-controllers
-
-[storage:children]
-controllers
-
-[baremetal:children]
-control
-network
-compute
-storage
-monitoring
-
-# You can explicitly specify which hosts run each project by updating the
-# groups in the sections below. Common services are grouped together.
-[collectd:children]
-compute
-
-[grafana:children]
-monitoring
-
-[etcd:children]
-control
-
-[influxdb:children]
-monitoring
-
-[kibana:children]
-control
-
-[telegraf:children]
-monitoring
-
-[elasticsearch:children]
-control
-
-[haproxy:children]
-network
-
-[mariadb:children]
-control
-
-[rabbitmq:children]
-control
-
-[mongodb:children]
-control
-
-[keystone:children]
-control
-
-[glance:children]
-control
-
-[nova:children]
-control
-
-[neutron:children]
-network
-
-[cinder:children]
-control
-
-[cloudkitty:children]
-control
-
-[memcached:children]
-control
-
-[horizon:children]
-control
-
-[swift:children]
-control
-
-[barbican:children]
-control
-
-[heat:children]
-control
-
-[murano:children]
-control
-
-[ironic:children]
-control
-
-[ceph:children]
-control
-
-[magnum:children]
-control
-
-[sahara:children]
-control
-
-[mistral:children]
-control
-
-[manila:children]
-control
-
-[ceilometer:children]
-control
-
-[aodh:children]
-control
-
-[congress:children]
-control
-
-[gnocchi:children]
-control
-
-# Tempest
-[tempest:children]
-control
-
-[senlin:children]
-control
-
-[vmtp:children]
-control
-
-[watcher:children]
-control
-
-[rally:children]
-control
-
-# Additional control implemented here. These groups allow you to control which
-# services run on which hosts at a per-service level.
-#
-# Word of caution: Some services are required to run on the same host to
-# function appropriately. For example, neutron-metadata-agent must run on the
-# same host as the l3-agent and (depending on configuration) the dhcp-agent.
-
-# Glance
-[glance-api:children]
-glance
-
-[glance-registry:children]
-glance
-
-# Nova
-[nova-api:children]
-nova
-
-[nova-conductor:children]
-nova
-
-[nova-consoleauth:children]
-nova
-
-[nova-novncproxy:children]
-nova
-
-[nova-scheduler:children]
-nova
-
-[nova-spicehtml5proxy:children]
-nova
-
-[nova-compute-ironic:children]
-nova
-
-# Neutron
-[neutron-server:children]
-control
-
-[neutron-dhcp-agent:children]
-neutron
-
-[neutron-l3-agent:children]
-neutron
-
-[neutron-lbaas-agent:children]
-neutron
-
-[neutron-metadata-agent:children]
-neutron
-
-[neutron-vpnaas-agent:children]
-neutron
-
-# Ceph
-[ceph-mon:children]
-ceph
-
-[ceph-rgw:children]
-ceph
-
-[ceph-osd:children]
-storage
-
-# Cinder
-[cinder-api:children]
-cinder
-
-[cinder-backup:children]
-storage
-
-[cinder-scheduler:children]
-cinder
-
-[cinder-volume:children]
-storage
-
-# Cloudkitty
-[cloudkitty-api:children]
-cloudkitty
-
-[cloudkitty-processor:children]
-cloudkitty
-
-# iSCSI
-[iscsid:children]
-compute
-storage
-ironic-conductor
-
-[tgtd:children]
-storage
-
-# Manila
-[manila-api:children]
-manila
-
-[manila-scheduler:children]
-manila
-
-[manila-share:children]
-network
-
-# Swift
-[swift-proxy-server:children]
-swift
-
-[swift-account-server:children]
-storage
-
-[swift-container-server:children]
-storage
-
-[swift-object-server:children]
-storage
-
-# Barbican
-[barbican-api:children]
-barbican
-
-[barbican-keystone-listener:children]
-barbican
-
-[barbican-worker:children]
-barbican
-
-# Heat
-[heat-api:children]
-heat
-
-[heat-api-cfn:children]
-heat
-
-[heat-engine:children]
-heat
-
-# Murano
-[murano-api:children]
-murano
-
-[murano-engine:children]
-murano
-
-# Ironic
-[ironic-api:children]
-ironic
-
-[ironic-conductor:children]
-ironic
-
-[ironic-inspector:children]
-ironic
-
-[ironic-pxe:children]
-ironic
-
-# Magnum
-[magnum-api:children]
-magnum
-
-[magnum-conductor:children]
-magnum
-
-# Sahara
-[sahara-api:children]
-sahara
-
-[sahara-engine:children]
-sahara
-
-# Mistral
-[mistral-api:children]
-mistral
-
-[mistral-executor:children]
-mistral
-
-[mistral-engine:children]
-mistral
-
-# Ceilometer
-[ceilometer-api:children]
-ceilometer
-
-[ceilometer-central:children]
-ceilometer
-
-[ceilometer-notification:children]
-ceilometer
-
-[ceilometer-collector:children]
-ceilometer
-
-[ceilometer-compute:children]
-compute
-
-# Aodh
-[aodh-api:children]
-aodh
-
-[aodh-evaluator:children]
-aodh
-
-[aodh-listener:children]
-aodh
-
-[aodh-notifier:children]
-aodh
-
-# Congress
-[congress-api:children]
-congress
-
-[congress-datasource:children]
-congress
-
-[congress-policy-engine:children]
-congress
-
-# Gnocchi
-[gnocchi-api:children]
-gnocchi
-
-[gnocchi-statsd:children]
-gnocchi
-
-[gnocchi-metricd:children]
-gnocchi
-
-# Multipathd
-[multipathd:children]
-compute
-
-# Watcher
-[watcher-api:children]
-watcher
-
-[watcher-engine:children]
-watcher
-
-[watcher-applier:children]
-watcher
-
-# Senlin
-[senlin-api:children]
-senlin
-
-[senlin-engine:children]
-senlin
-EOF
-
-    my_ip=$(ip route get 192.168.0.1 | awk '{ print $5 }')
-    vip=$(python -c "import netaddr; a = netaddr.IPAddress('$my_ip'); print a+1")
-    my_intf=$(ip route get 192.168.0.1 | awk '{ print $3 }')
-
-    cp /usr/share/kolla/etc_examples/kolla/* /etc/kolla
-    cat >> /etc/kolla/globals.yml << EOF
-##################################################
-# Begin overrides
-##################################################
-
-# OpenStack distro
-kolla_base_distro: "centos"
-kolla_install_type: "binary"
-openstack_release: "3.0.1"
-
-# Networking
-kolla_internal_vip_address: "${vip}"
-network_interface: "${my_intf}"
-
-# TLS
-#kolla_enable_tls_external: "no"
-#kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
-
-# Services
-enable_ironic: "yes"
-EOF
-
-    # Generate passwords
-    kolla-genpwd
-
-    # Configure Kolla build
-    cat > /etc/kolla/template-override.j2 << EOF
-{% extends parent_template %}
-
-# Disable troublesome keys
-{% set base_yum_repo_keys_override=['http://yum.mariadb.org/RPM-GPG-KEY-MariaDB'] %}
-# Disable repos with troublesome keys
-{% set base_yum_repo_files_override=['MariaDB.repo'] %}
-EOF
-    cat > /etc/kolla/kolla-build.conf << EOF
-[DEFAULT]
-template_override=/etc/kolla/template-override.j2
-EOF
-
-    # Configure Bifrost
-    mkdir /etc/kolla/config/bifrost
-    cat > /etc/kolla/config/bifrost/bifrost.yml << EOF
----
-EOF
-    cat > /etc/kolla/config/bifrost/dib.yml << EOF
----
-dib_os_element: "centos7"
-EOF
-    cat > /etc/kolla/config/bifrost/servers.yml << EOF
----
-EOF
-}
-
-function bootstrap_seed_kolla {
-    # Bootstrap seed node
-    kolla-ansible bootstrap-servers -i /etc/kolla/inventory/seed -e ansible_user=$(whoami)
-    ansible seed -i /etc/kolla/inventory/seed -b -m authorized_key -a "user=kolla key='$(cat ~/.ssh/id_rsa.pub)'" -e ansible_user=$(whoami)
-    ansible seed -i /etc/kolla/inventory/seed -b -m user -a "name=$(whoami) groups=kolla,docker append=true"
-    ansible seed -i /etc/kolla/inventory/seed -m command -a 'docker info'
-    # Enable NTPd
-    ansible seed -i /etc/kolla/inventory/seed -b -m service -a 'name=ntpd state=started enabled=yes'
-}
-
-function configure_seed_docker {
-    # TODO
-    echo "TODO: configure docker on seed"
+function configure_os {
+    run_playbook ansible/ssh-known-host.yml -l seed
+    run_playbook ansible/disable-selinux.yml -l seed
+    run_playbook ansible/network.yml -l seed
+    run_playbook ansible/ntp.yml -l seed
+    run_kolla_ansible bootstrap-servers -e ansible_user=${USER}
+    run_playbook ansible/kolla-host.yml -l seed
+    run_playbook ansible/docker.yml -l seed
 }
 
 function deploy_bifrost {
-    if true ; then
-        # Build Bifrost image
-        # FIXME: sudo required because we need to log out/in for docker group
-        # membership to take effect.
-        sudo kolla-build -t source bifrost-deploy
-    else
-        # Image on Dockerhub not currently working :(
-        docker pull docker.io/kolla/centos-source-bifrost-deploy:3.0.1
-    fi
-
-    # Deploy Bifrost
-    kolla-ansible deploy-bifrost -i /etc/kolla/inventory/seed -e kolla_install_type=source
+    # Use a pre-built bifrost image in the stackhpc repository.
+    # The image was built via kolla-build -t source bifrost-deploy.
+    run_playbook ansible/kolla-bifrost.yml
+    run_kolla_ansible deploy-bifrost \
+      -e kolla_install_type=source \
+      -e docker_namespace=stackhpc
 }
 
 function deploy_seed_node {
-    configure_seed_os
-    install_kolla
-    configure_kolla
-    bootstrap_seed_kolla
-    configure_seed_docker
+    configure_os
     deploy_bifrost
 }
 
@@ -608,11 +61,6 @@ function deploy_seed_node {
 # Main
 
 function main {
-    if [[ $# -ne 1 ]]; then
-        echo "Usage: $0 <controller IP>"
-        exit 1
-    fi
-    CONTROLLER_IP=$1
     deploy_seed_node
 }
 
diff --git a/etc/kayobe/bifrost.yml b/etc/kayobe/bifrost.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d88eeabd81200f864107d30ac2ba9d920cc113cb
--- /dev/null
+++ b/etc/kayobe/bifrost.yml
@@ -0,0 +1,36 @@
+---
+# Kayobe configuration for Bifrost.
+
+###############################################################################
+# Diskimage-builder configuration.
+
+# DIB base OS element.
+#kolla_bifrost_dib_os_element:
+
+# List of DIB elements.
+#kolla_bifrost_dib_elements:
+
+# DIB init element.
+#kolla_bifrost_dib_init_element:
+
+# DIB environment variables.
+#kolla_bifrost_dib_env_vars:
+
+# List of DIB packages to install.
+#kolla_bifrost_dib_packages:
+
+###############################################################################
+# Ironic configuration.
+
+# Whether to enable ipmitool-based drivers.
+#kolla_bifrost_enable_ipmitool_drivers:
+
+###############################################################################
+# Inventory configuration.
+
+# Server inventory for Bifrost.
+#kolla_bifrost_servers:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/dns.yml b/etc/kayobe/dns.yml
new file mode 100644
index 0000000000000000000000000000000000000000..46c48fb831cd092afb22f6860d5877c25d15c922
--- /dev/null
+++ b/etc/kayobe/dns.yml
@@ -0,0 +1,23 @@
+---
+###############################################################################
+# DNS.
+
+# List of DNS nameservers.
+#resolv_nameservers:
+
+# DNS domain suffix.
+#resolv_domain:
+
+# List of DNS search suffixes.
+#resolv_search:
+
+# List of IP address and netmask pairs to sort addresses returned by
+# gethostbyname.
+#resolv_sortlist:
+
+# List of DNS options.
+#resolv_options:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/globals.yml b/etc/kayobe/globals.yml
new file mode 100644
index 0000000000000000000000000000000000000000..68756cb4248ed519f1c62902bf87a4b6178a6e4b
--- /dev/null
+++ b/etc/kayobe/globals.yml
@@ -0,0 +1,15 @@
+---
+# Kayobe global configuration.
+
+###############################################################################
+# Miscellaneous configuration.
+
+# Path to Kayobe configuration.
+#kayobe_config_path:
+
+# Path in which to cache downloaded images.
+#image_cache_path:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/group_vars/controllers/network-interfaces b/etc/kayobe/inventory/group_vars/controllers/network-interfaces
new file mode 100644
index 0000000000000000000000000000000000000000..eb774eec8792c181478ee8e9bf3ffcd29e0eca0e
--- /dev/null
+++ b/etc/kayobe/inventory/group_vars/controllers/network-interfaces
@@ -0,0 +1,31 @@
+---
+###############################################################################
+# Network interface definitions for the controller group.
+
+# Overcloud provisioning network IP information.
+# provision_oc_net_interface:
+# provision_oc_net_bridge_ports:
+
+# Workload provisioning network IP information.
+# provision_wl_net_interface:
+# provision_wl_net_bridge_ports:
+
+# Internal network IP information.
+# internal_net_interface:
+# internal_net_bridge_ports:
+
+# External network IP information.
+# external_net_interface:
+# external_net_bridge_ports:
+
+# Storage network IP information.
+# storage_net_interface:
+# storage_net_bridge_ports:
+
+# Storage management network IP information.
+# storage_mgmt_net_interface:
+# storage_mgmt_net_bridge_ports:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/group_vars/seed/network-interfaces b/etc/kayobe/inventory/group_vars/seed/network-interfaces
new file mode 100644
index 0000000000000000000000000000000000000000..628d2de761657fde94f455254690b3f81fa5b469
--- /dev/null
+++ b/etc/kayobe/inventory/group_vars/seed/network-interfaces
@@ -0,0 +1,31 @@
+---
+###############################################################################
+# Network interface definitions for the seed group.
+
+# Overcloud provisioning network IP information.
+# provision_oc_net_interface:
+# provision_oc_net_bridge_ports:
+
+# Workload provisioning network IP information.
+# provision_wl_net_interface:
+# provision_wl_net_bridge_ports:
+
+# Internal network IP information.
+# internal_net_interface:
+# internal_net_bridge_ports:
+
+# External network IP information.
+# external_net_interface:
+# external_net_bridge_ports:
+
+# Storage network IP information.
+# storage_net_interface:
+# storage_net_bridge_ports:
+
+# Storage management network IP information.
+# storage_mgmt_net_interface:
+# storage_mgmt_net_bridge_ports:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups
new file mode 100644
index 0000000000000000000000000000000000000000..15f9ea96d26d214ead9c796e298fa58efd0dbe11
--- /dev/null
+++ b/etc/kayobe/inventory/groups
@@ -0,0 +1,13 @@
+# Kayobe groups inventory file. This file should generally not be modified.
+# If declares the top-level groups and sub-groups.
+
+[seed]
+# Empty group to provide declaration of seed group.
+
+[controllers]
+# Empty group to provide declaration of controllers group.
+
+[docker:children]
+# Hosts in this group will have Docker installed.
+seed
+controllers
diff --git a/etc/kayobe/inventory/hosts.example b/etc/kayobe/inventory/hosts.example
new file mode 100644
index 0000000000000000000000000000000000000000..16b7dbf44562be5eee5b89a973b528e762f4c6b8
--- /dev/null
+++ b/etc/kayobe/inventory/hosts.example
@@ -0,0 +1,19 @@
+# Kayobe hosts inventory file. This file should be modified to define the hosts
+# and their top-level group membership.
+
+[config-mgmt]
+# This host acts as the configuration management control host. This must be
+# localhost.
+localhost ansible_connection=local
+
+[seed-hypervisor]
+# Add a seed hypervisor node here if required. This host will run a seed node
+# Virtual Machine.
+
+[seed]
+# Add a seed node here if required. This host will provide the Bifrost
+# undercloud.
+
+[controllers]
+# Add controller nodes here if required. These hosts will provide the
+# OpenStack overcloud.
diff --git a/etc/kayobe/kolla.yml b/etc/kayobe/kolla.yml
new file mode 100644
index 0000000000000000000000000000000000000000..60be86d73f641ffb752bc1f9095d4db6779960a8
--- /dev/null
+++ b/etc/kayobe/kolla.yml
@@ -0,0 +1,74 @@
+---
+# Kayobe Kolla configuration.
+
+###############################################################################
+# Kolla configuration.
+
+# Path to Kolla configuration directory.
+#kolla_config_path:
+
+# Kolla base container image distribution.
+#kolla_base_distro:
+
+# Kolla installation type: binary or source.
+#kolla_install_type:
+
+# Kolla OpenStack release version. This should be a Docker image tag.
+#kolla_openstack_release:
+
+# Whether TLS is enabled for the external API endpoints.
+#kolla_enable_tls_external:
+
+# Path to external API certificate.
+#kolla_external_fqdn_cert:
+
+# Whether debug logging is enabled.
+#kolla_openstack_logging_debug:
+
+###############################################################################
+# Kolla feature flag configuration.
+
+#kolla_enable_aodh:
+#kolla_enable_barbican:
+#kolla_enable_cadf_notifications:
+#kolla_enable_ceilometer:
+#kolla_enable_central_logging:
+#kolla_enable_ceph:
+#kolla_enable_ceph_rgw:
+#kolla_enable_cinder:
+#kolla_enable_cinder_backend_iscsi:
+#kolla_enable_cinder_backend_lvm:
+#kolla_enable_cloudkitty:
+#kolla_enable_congress:
+#kolla_enable_etcd:
+#kolla_enable_gnocchi:
+#kolla_enable_grafana:
+#kolla_enable_heat:
+#kolla_enable_horizon:
+#kolla_enable_influxdb:
+#kolla_enable_ironic:
+#kolla_enable_iscsid:
+#kolla_enable_kuryr:
+#kolla_enable_magnum:
+#kolla_enable_manila:
+#kolla_enable_mistral:
+#kolla_enable_mongodb:
+#kolla_enable_multipathd:
+#kolla_enable_murano:
+#kolla_enable_neutron_vpnaas:
+#kolla_enable_neutron_dvr:
+#kolla_enable_neutron_lbaas:
+#kolla_enable_neutron_qos:
+#kolla_enable_neutron_agent_ha:
+#kolla_enable_rally:
+#kolla_enable_sahara:
+#kolla_enable_senlin:
+#kolla_enable_swift:
+#kolla_enable_telegraf:
+#kolla_enable_tempest:
+#kolla_enable_vmtp:
+#kolla_enable_watcher:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/kolla/globals.yml b/etc/kayobe/kolla/globals.yml
new file mode 100644
index 0000000000000000000000000000000000000000..794c5fee5e25e4b8c2f7111aed551edac1296e06
--- /dev/null
+++ b/etc/kayobe/kolla/globals.yml
@@ -0,0 +1,2 @@
+---
+# Add extra Kolla global configuration here.
diff --git a/etc/kayobe/networks.yml b/etc/kayobe/networks.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c9219b8d554d1a61df471524923d89369753d6ca
--- /dev/null
+++ b/etc/kayobe/networks.yml
@@ -0,0 +1,66 @@
+---
+# Kayobe network configuration.
+
+###############################################################################
+# Network roles.
+
+# Network role to network mappings.
+#provision_oc_net_name: 'provision_oc_net'
+#provision_wl_net_name: 'provision_wl_net'
+#internal_net_name: 'internal_net'
+#external_net_name: 'external_net'
+#storage_net_name: 'storage_net'
+#storage_mgmt_net_name: 'storage_mgmt_net'
+
+###############################################################################
+# Network definitions.
+
+# Overcloud provisioning network IP information.
+# provision_oc_net_cidr:
+# provision_oc_net_allocation_pool_start:
+# provision_oc_net_allocation_pool_end:
+# provision_oc_net_gateway:
+# provision_oc_net_vlan:
+
+# Workload provisioning network IP information.
+# provision_wl_net_cidr:
+# provision_wl_net_allocation_pool_start:
+# provision_wl_net_allocation_pool_end:
+# provision_wl_net_gateway:
+# provision_wl_net_vlan:
+
+# Internal network IP information.
+# internal_net_vip_address:
+# internal_net_fqdn:
+# internal_net_cidr:
+# internal_net_allocation_pool_start:
+# internal_net_allocation_pool_end:
+# internal_net_gateway:
+# internal_net_vlan:
+
+# External network IP information.
+# external_net_vip_address:
+# external_net_fqdn:
+# external_net_cidr:
+# external_net_allocation_pool_start:
+# external_net_allocation_pool_end:
+# external_net_gateway:
+# external_net_vlan:
+
+# Storage network IP information.
+# storage_net_cidr:
+# storage_net_allocation_pool_start:
+# storage_net_allocation_pool_end:
+# storage_net_gateway:
+# storage_net_vlan:
+
+# Storage management network IP information.
+# storage_mgmt_net_cidr:
+# storage_mgmt_net_allocation_pool_start:
+# storage_mgmt_net_allocation_pool_end:
+# storage_mgmt_net_gateway:
+# storage_mgmt_net_vlan:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/ntp.yml b/etc/kayobe/ntp.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4c0f0b42f33e745868b221571b1e11a827f21161
--- /dev/null
+++ b/etc/kayobe/ntp.yml
@@ -0,0 +1,38 @@
+---
+# Kayobe NTP configuration.
+
+###############################################################################
+# Timezone.
+
+# Name of the local timezone.
+#timezone:
+
+###############################################################################
+# Network Time Protocol (NTP).
+
+# List of names of NTP servers.
+#ntp_config_server:
+
+# List of NTP restrictions to add to ntp.conf.
+#ntp_config_restrict:
+
+# List of addresses for NTP daemon to listen on.
+#ntp_config_listen:
+
+# Other NTP configuration options.
+#ntp_config_filegen:
+#ntp_config_statistics:
+#ntp_config_crypto:
+#ntp_config_includefile:
+#ntp_config_keys:
+#ntp_config_trustedkey:
+#ntp_config_requestkey:
+#ntp_config_controlkey:
+#ntp_config_broadcast:
+#ntp_config_broadcastclient:
+#ntp_config_multicastclient:
+#ntp_config_tinker_panic_enabled:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/seed-vm.yml b/etc/kayobe/seed-vm.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e9fe717735d3f4e5da38e4f1ad59fb92b9c17db0
--- /dev/null
+++ b/etc/kayobe/seed-vm.yml
@@ -0,0 +1,34 @@
+---
+###############################################################################
+# Seed node VM configuration.
+
+# Name of the seed VM.
+#seed_vm_name:
+
+# Memory in MB.
+#seed_vm_memory_mb:
+
+# Number of vCPUs.
+#seed_vm_vcpus:
+
+# List of volumes.
+#seed_vm_volumes:
+
+# List of network interfaces.
+#seed_vm_interfaces:
+
+# Name of the storage pool for the seed VM volumes.
+#seed_vm_pool:
+
+# Capacity of the seed VM root volume.
+#seed_vm_root_capacity:
+
+# Format of the seed VM root volume.
+#seed_vm_root_format:
+
+# Base image for the seed VM root volume.
+#seed_vm_root_image:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/etc/kayobe/swift.yml b/etc/kayobe/swift.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3af868cf9d620479b6b49916413d020e9ec4692c
--- /dev/null
+++ b/etc/kayobe/swift.yml
@@ -0,0 +1,20 @@
+---
+###############################################################################
+# OpenStack Swift configuration.
+
+# Base-2 logarithm of the number of partitions.
+# i.e. num_partitions=2^<swift_part_power>.
+#swift_part_power:
+
+# Object replication count.
+#swift_replication_count:
+
+# Minimum time in hours between moving a given partition.
+#swift_min_part_hours:
+
+# Number of Swift Zones.
+#swift_num_zones:
+
+###############################################################################
+# Dummy variable to allow Ansible to accept this file.
+workaround_ansible_issue_8743: yes
diff --git a/kayobe-playbook b/kayobe-playbook
new file mode 100755
index 0000000000000000000000000000000000000000..f19288c13d6c2a7728f70c439c46471465991f0e
--- /dev/null
+++ b/kayobe-playbook
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
+
+# Ansible fails silently if the inventory does not exist.
+test -e ${KAYOBE_CONFIG_PATH}/inventory
+
+# Execute a Kayobe playbook.
+exec ansible-playbook \
+  -i ${KAYOBE_CONFIG_PATH}/inventory \
+  -e @${KAYOBE_CONFIG_PATH}/bifrost.yml \
+  -e @${KAYOBE_CONFIG_PATH}/dns.yml \
+  -e @${KAYOBE_CONFIG_PATH}/globals.yml \
+  -e @${KAYOBE_CONFIG_PATH}/kolla.yml \
+  -e @${KAYOBE_CONFIG_PATH}/networks.yml \
+  -e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
+  -e @${KAYOBE_CONFIG_PATH}/ntp.yml \
+  -e @${KAYOBE_CONFIG_PATH}/seed-vm.yml \
+  -e @${KAYOBE_CONFIG_PATH}/swift.yml \
+  $@
diff --git a/provision-overcloud.sh b/provision-overcloud.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3b2a695a9d89355f4df02eb84b1089336675429f
--- /dev/null
+++ b/provision-overcloud.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+set -e
+
+function run_kolla_ansible {
+    export KOLLA_CONFIG_PATH=${KOLLA_CONFIG_PATH:-/etc/kolla}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KOLLA_CONFIG_PATH}/inventory/seed
+    KOLLA_VENV=$(pwd)/ansible/kolla-venv
+    source ${KOLLA_VENV}/bin/activate
+    kolla-ansible \
+        --configdir ${KOLLA_CONFIG_PATH} \
+        --passwords ${KOLLA_CONFIG_PATH}/passwords.yml \
+        -i ${KOLLA_CONFIG_PATH}/inventory/seed \
+        $@
+    deactivate
+}
+
+function configure_network {
+    echo "TODO: configure overcloud network"
+}
+
+function configure_bios_and_raid {
+    echo "TODO: configure overcloud BIOS and RAID"
+}
+
+function deploy_servers {
+    # Deploy servers with Bifrost
+    run_kolla_ansible deploy-servers
+}
+
+function provision_overcloud {
+    configure_network
+    configure_bios_and_raid
+    deploy_servers
+}
+
+###########################################################
+# Main
+
+function main {
+    provision_overcloud
+}
+
+provision_overcloud
diff --git a/provision-seed.sh b/provision-seed.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d87672bfeaea4d2f784c4329cc2de2ecb4aae5ea
--- /dev/null
+++ b/provision-seed.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -e
+
+function run_playbook {
+    KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
+    # Ansible fails silently if the inventory does not exist.
+    test -e ${KAYOBE_CONFIG_PATH}/inventory
+    ansible-playbook \
+        -i ${KAYOBE_CONFIG_PATH}/inventory \
+        -e @${KAYOBE_CONFIG_PATH}/dns.yml \
+        -e @${KAYOBE_CONFIG_PATH}/globals.yml \
+        -e @${KAYOBE_CONFIG_PATH}/kolla.yml \
+        -e @${KAYOBE_CONFIG_PATH}/networks.yml \
+        -e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
+        -e @${KAYOBE_CONFIG_PATH}/ntp.yml \
+        -e @${KAYOBE_CONFIG_PATH}/seed-vm.yml \
+        -e @${KAYOBE_CONFIG_PATH}/swift.yml \
+        $@
+}
+
+function provision_seed_vm {
+    run_playbook ansible/seed-vm.yml
+}
+
+###########################################################
+# Main
+
+function main {
+    provision_seed_vm
+}
+
+main $*