diff --git a/ansible/roles/ceph/meta/main.yml b/ansible/roles/ceph/meta/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6b4fff8fef6f81d35b73a30c90de45639db41cc9
--- /dev/null
+++ b/ansible/roles/ceph/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: common }
diff --git a/ansible/roles/ceph/tasks/bootstrap_osds.yml b/ansible/roles/ceph/tasks/bootstrap_osds.yml
index 39802d080d3c287222fe4f80e0037485868d8862..8a0158d7ded15b4500e4e7968ede777d732ea10c 100644
--- a/ansible/roles/ceph/tasks/bootstrap_osds.yml
+++ b/ansible/roles/ceph/tasks/bootstrap_osds.yml
@@ -1,15 +1,27 @@
 ---
 - name: Looking up disks to bootstrap for Ceph
-  find_disks:
-      partition_name: 'KOLLA_CEPH_OSD_BOOTSTRAP'
-  register: osds_bootstrap
-  when: inventory_hostname in groups['ceph-osd']
+  command: docker exec -t kolla_ansible /usr/bin/ansible localhost
+    -m find_disks
+    -a "partition_name='KOLLA_CEPH_OSD_BOOTSTRAP'"
+  register: osd_lookup
+  changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
+  failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
 
-- name: Looking up disks to bootstrap for Ceph cache
-  find_disks:
-      partition_name: 'KOLLA_CEPH_OSD_CACHE_BOOTSTRAP'
-  register: osds_cache_bootstrap
-  when: inventory_hostname in groups['ceph-osd']
+- name: Reading data from variable
+  set_fact:
+    osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
+
+- name: Looking up disks to bootstrap for Ceph
+  command: docker exec -t kolla_ansible /usr/bin/ansible localhost
+    -m find_disks
+    -a "partition_name='KOLLA_CEPH_OSD_CACHE_BOOTSTRAP'"
+  register: osd_cache_lookup
+  changed_when: "{{ osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
+  failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
+
+- name: Reading data from variable
+  set_fact:
+    osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
 
 - name: Bootstrapping Ceph OSDs
   docker:
@@ -33,8 +45,7 @@
       KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
       OSD_DEV: "{{ item.1.device }}"
       OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
-  with_indexed_items: osds_bootstrap['disks']|default([])
-  when: inventory_hostname in groups['ceph-osd']
+  with_indexed_items: osds_bootstrap|default([])
 
 # https://github.com/ansible/ansible-modules-core/pull/1031
 - name: Waiting for bootstrap containers to exit
@@ -42,8 +53,7 @@
   register: bootstrap_result
   run_once: True
   failed_when: bootstrap_result.stdout != "0"
-  with_indexed_items: osds_bootstrap['disks']|default([])
-  when: inventory_hostname in groups['ceph-osd']
+  with_indexed_items: osds_bootstrap|default([])
 
 - name: Cleaning up bootstrap containers
   docker:
@@ -51,8 +61,7 @@
     name: "bootstrap_osd_{{ item.0 }}"
     image: "{{ ceph_osd_image_full }}"
     state: absent
-  with_indexed_items: osds_bootstrap['disks']|default([])
-  when: inventory_hostname in groups['ceph-osd']
+  with_indexed_items: osds_bootstrap|default([])
 
 - name: Bootstrapping Ceph Cache OSDs
   docker:
@@ -77,8 +86,7 @@
       CEPH_CACHE:
       OSD_DEV: "{{ item.1.device }}"
       OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
-  with_indexed_items: osds_cache_bootstrap['disks']|default([])
-  when: inventory_hostname in groups['ceph-osd']
+  with_indexed_items: osds_cache_bootstrap|default([])
 
 # https://github.com/ansible/ansible-modules-core/pull/1031
 - name: Waiting for bootstrap containers to exit
@@ -86,8 +94,7 @@
   register: bootstrap_result
   run_once: True
   failed_when: bootstrap_result.stdout != "0"
-  with_indexed_items: osds_cache_bootstrap['disks']|default([])
-  when: inventory_hostname in groups['ceph-osd']
+  with_indexed_items: osds_cache_bootstrap|default([])
 
 - name: Cleaning up bootstrap containers
   docker:
@@ -95,5 +102,4 @@
     name: "bootstrap_osd_cache_{{ item.0 }}"
     image: "{{ ceph_osd_image_full }}"
     state: absent
-  with_indexed_items: osds_cache_bootstrap['disks']|default([])
-  when: inventory_hostname in groups['ceph-osd']
+  with_indexed_items: osds_cache_bootstrap|default([])
diff --git a/ansible/roles/ceph/tasks/start_osds.yml b/ansible/roles/ceph/tasks/start_osds.yml
index 657f9483a9fd71bca2edeed277b7f2f8b0ad1d2c..ab7bd35dddb16df3c9503fe259a8bcbe523644a4 100644
--- a/ansible/roles/ceph/tasks/start_osds.yml
+++ b/ansible/roles/ceph/tasks/start_osds.yml
@@ -1,20 +1,27 @@
 ---
 - name: Looking up OSDs for Ceph
-  find_disks:
-      partition_name: 'KOLLA_CEPH_DATA'
-  register: osds
+  command: docker exec -t kolla_ansible /usr/bin/ansible localhost
+    -m find_disks
+    -a "partition_name='KOLLA_CEPH_DATA'"
+  register: osd_lookup
+  changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
+  failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
+
+- name: Reading data from variable
+  set_fact:
+    osds: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
 
 - name: Mounting Ceph OSD volumes
   mount:
     src: "UUID={{ item.fs_uuid }}"
     fstype: xfs
     state: mounted
-    name: "/var/lib/ceph/osd/{{ item.fs_uuid }}"
-  with_items: osds.disks
+    name: "/var/lib/ceph/osd/{{ item['fs_uuid'] }}"
+  with_items: osds
 
 - name: Gathering OSD IDs
-  command: 'cat /var/lib/ceph/osd/{{ item.fs_uuid }}/whoami'
-  with_items: osds.disks
+  command: "cat /var/lib/ceph/osd/{{ item['fs_uuid'] }}/whoami"
+  with_items: osds
   register: id
   changed_when: False
   failed_when: id.rc != 0
@@ -36,15 +43,16 @@
     name: "ceph_osd_{{ item.0.stdout }}"
     image: "{{ ceph_osd_image_full }}"
     volumes:
-      - "/var/lib/ceph/osd/{{ item.1.fs_uuid }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
+      - "/var/lib/ceph/osd/{{ item.1['fs_uuid'] }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
       - "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
       - "/dev/:/dev/"
     env:
       KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
       OSD_ID: "{{ item.0.stdout }}"
-      OSD_DEV: "{{ item.1.device }}"
+      OSD_DEV: "{{ item.1['device'] }}"
   with_together:
     - id.results
-    - osds.disks
-  when: inventory_hostname in groups['ceph-osd']
-        and osds.disks
+    - osds
+  when:
+    - inventory_hostname in groups['ceph-osd']
+    - osds
diff --git a/ansible/roles/common/tasks/start.yml b/ansible/roles/common/tasks/start.yml
index 2484c896b0c77c6dc648e996bfd92b53b51506e6..68ba56be07d8f71859772d4fc612e6812ee09e43 100755
--- a/ansible/roles/common/tasks/start.yml
+++ b/ansible/roles/common/tasks/start.yml
@@ -60,4 +60,6 @@
       ANSIBLE_LIBRARY: "/usr/share/ansible"
     volumes:
       - /var/lib/kolla/dev/log:/dev/log
+      - /dev/:/dev/
+      - /run/:/run/
     command: "/bin/sleep infinity"
diff --git a/docker/kolla-ansible/Dockerfile.j2 b/docker/kolla-ansible/Dockerfile.j2
index bf46b8e919f859b034f5662ce174308ff9586370..47e66e227a9f3ea1b4e768c9bb6ffd7cda6b5efe 100644
--- a/docker/kolla-ansible/Dockerfile.j2
+++ b/docker/kolla-ansible/Dockerfile.j2
@@ -17,7 +17,9 @@ RUN pip install -U pip wheel
 
 {% endif %}
 
-RUN pip --no-cache-dir install shade
+RUN pip --no-cache-dir install \
+        shade \
+        pyudev
 
 RUN git clone --depth 1 -b v2.0.0-0.2.alpha2 https://github.com/ansible/ansible.git \
     && cd ansible \
@@ -28,7 +30,7 @@ RUN mkdir -p /etc/ansible /usr/share/ansible /home/ansible \
     && echo 'localhost ansible_connection=local' > /etc/ansible/hosts \
     && useradd --user-group ansible --groups kolla
 
-COPY kolla_keystone_service.py kolla_keystone_user.py /usr/share/ansible/
+COPY find_disks.py kolla_keystone_service.py kolla_keystone_user.py /usr/share/ansible/
 COPY ansible.cfg /home/ansible/.ansible.cfg
 
 {{ include_footer }}
diff --git a/ansible/library/find_disks.py b/docker/kolla-ansible/find_disks.py
similarity index 62%
rename from ansible/library/find_disks.py
rename to docker/kolla-ansible/find_disks.py
index d7f166fc90178152f091fca81a2cfdd93d4c88cb..7b9324b2cef2c958b1e5f186c3b7d4a22df4f314 100644
--- a/ansible/library/find_disks.py
+++ b/docker/kolla-ansible/find_disks.py
@@ -42,9 +42,8 @@ EXAMPLES = '''
       register: osds
 '''
 
-import sys
-import subprocess
-
+import json
+import pyudev
 
 def main():
     module = AnsibleModule(
@@ -52,33 +51,19 @@ def main():
             partition_name = dict(required=True, type='str')
         )
     )
-
     partition_name = module.params.get('partition_name')
 
     try:
-        # This should all really be done differently. Unfortunately there is no
-        # decent python library for dealing with disks like we need to here.
-        disks = subprocess.check_output("parted -l", shell=True).split('\n')
         ret = list()
-
-        for line in disks:
-            d = line.split(' ')
-            if d[0] == 'Disk' and d[1] != 'Flags:':
-                dev = d[1][:-1]
-
-            if line.find(partition_name) != -1:
-                # This process returns an error code when no results return
-                # We can ignore that, it is safe
-                p = subprocess.Popen("blkid " + dev + "*", shell=True, stdout=subprocess.PIPE)
-                blkid_out = p.communicate()[0]
-                # The dev doesn't need to have a uuid, will be '' otherwise
-                if ' UUID=' in blkid_out:
-                    fs_uuid = blkid_out.split(' UUID="')[1].split('"')[0]
-                else:
+        ct = pyudev.Context()
+        for dev in ct.list_devices(subsystem='block', DEVTYPE='partition'):
+            if dev.get('ID_PART_ENTRY_NAME') == partition_name:
+                fs_uuid = dev.get('ID_FS_UUID')
+                if not fs_uuid:
                     fs_uuid = ''
-                ret.append({'device': dev, 'fs_uuid': fs_uuid})
-
-        module.exit_json(disks=ret)
+                dev_parent = dev.find_parent('block').device_node
+                ret.append({'device': dev_parent, 'fs_uuid': fs_uuid})
+        module.exit_json(disks=json.dumps(ret))
     except Exception as e:
         module.exit_json(failed=True, msg=repr(e))