diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 30f4b31722f5b3d1cf251cf3ce9f0e4b140988e2..bfa4c20517eccbdad461dfd17d42d6c3294d2ab0 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -225,6 +225,12 @@ ceph_enable_cache: "no"
 # Valid options are [ forward, none, writeback ]
 ceph_cache_mode: "writeback"
 
+# Valid options are [ ext4, btrfs, xfs ]
+ceph_osd_filesystem: "xfs"
+
+# These are /etc/fstab options. Comma seperated, no spaces (see fstab(8))
+ceph_osd_mount_options: "defaults,noatime"
+
 # A requirement for using the erasure-coded pools is you must setup a cache tier
 # Valid options are [ erasure, replicated ]
 ceph_pool_type: "replicated"
diff --git a/ansible/roles/ceph/tasks/bootstrap_osds.yml b/ansible/roles/ceph/tasks/bootstrap_osds.yml
index 38ad2de44bc0c5aea2174763ee8e9fede4f14a5c..16599170e6f1f3905cb44a35fd0f2f09f6eee2db 100644
--- a/ansible/roles/ceph/tasks/bootstrap_osds.yml
+++ b/ansible/roles/ceph/tasks/bootstrap_osds.yml
@@ -32,6 +32,7 @@
       KOLLA_BOOTSTRAP:
       KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
       OSD_DEV: "{{ item.1.device }}"
+      OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
       OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
       HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
     image: "{{ ceph_osd_image_full }}"
@@ -55,6 +56,7 @@
       KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
       CEPH_CACHE:
       OSD_DEV: "{{ item.1.device }}"
+      OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
       OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
       HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
     image: "{{ ceph_osd_image_full }}"
diff --git a/ansible/roles/ceph/tasks/start_osds.yml b/ansible/roles/ceph/tasks/start_osds.yml
index 1a59cbcd622a9bdb06595b3920a68cc8fac25b62..3f2bdd4cdc3695d8ec287d5b8cf8918b938cec71 100644
--- a/ansible/roles/ceph/tasks/start_osds.yml
+++ b/ansible/roles/ceph/tasks/start_osds.yml
@@ -14,9 +14,10 @@
 - name: Mounting Ceph OSD volumes
   mount:
     src: "UUID={{ item.fs_uuid }}"
-    fstype: xfs
+    fstype: "{{ ceph_osd_filesystem }}"
     state: mounted
     name: "/var/lib/ceph/osd/{{ item['fs_uuid'] }}"
+    opts: "{{ ceph_osd_mount_options }}"
   with_items: osds
 
 - name: Gathering OSD IDs
diff --git a/docker/ceph/ceph-base/Dockerfile.j2 b/docker/ceph/ceph-base/Dockerfile.j2
index c51cad8196aea50a5f86f8ad3903bf49a68a87cd..4787448f860c9d7b754610d56673dd41fa262973 100644
--- a/docker/ceph/ceph-base/Dockerfile.j2
+++ b/docker/ceph/ceph-base/Dockerfile.j2
@@ -8,6 +8,7 @@ RUN yum -y install \
         ceph-radosgw \
         parted \
         hdparm \
+        btrfs-progs \
     && yum clean all
 
 {% elif base_distro in ['ubuntu', 'debian'] %}
@@ -17,6 +18,7 @@ RUN apt-get install -y --no-install-recommends \
         radosgw \
         parted \
         hdparm \
+        btrfs-tools \
     && apt-get clean
 
 {% endif %}
diff --git a/docker/ceph/ceph-osd/extend_start.sh b/docker/ceph/ceph-osd/extend_start.sh
index 7c1e479c05477b48dfa88d3bbd95f7535c617678..5db6f61c9358fdf4159f7f75560f0b024790058e 100644
--- a/docker/ceph/ceph-osd/extend_start.sh
+++ b/docker/ceph/ceph-osd/extend_start.sh
@@ -29,7 +29,14 @@ if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
     OSD_ID=$(ceph osd create)
     OSD_DIR="/var/lib/ceph/osd/ceph-${OSD_ID}"
     mkdir -p "${OSD_DIR}"
-    mkfs.xfs -f "${OSD_PARTITION}"
+
+    if [[ "${OSD_FILESYSTEM}" == "btrfs" ]]; then
+        mkfs.btrfs -f "${OSD_PARTITION}"
+    elif [[ "${OSD_FILESYSTEM}" == "ext4" ]]; then
+        mkfs.ext4 "${OSD_PARTITION}"
+    else
+        mkfs.xfs -f "${OSD_PARTITION}"
+    fi
     mount "${OSD_PARTITION}" "${OSD_DIR}"
 
     # This will through an error about no key existing. That is normal. It then