diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 92fb3015bbc01bdd61b7e1423b91fa0ea0a64eef..b1c28b93c36cdfb569a10786bacd32cf462b6c96 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -525,6 +525,12 @@ octavia_amp_flavor_id:
 # Ceph can be setup with a caching to improve performance. To use the cache you
 # must provide separate disks than those for the OSDs
 ceph_enable_cache: "no"
+
+# Ceph is not able to determine the size of a cache pool automatically,
+# so the configuration on the absolute size is required here, otherwise the flush/evict will not work.
+ceph_target_max_bytes: ""
+ceph_target_max_objects: ""
+
 # Valid options are [ forward, none, writeback ]
 ceph_cache_mode: "writeback"
 
diff --git a/ansible/roles/ceph_pools.yml b/ansible/roles/ceph_pools.yml
index ae496e956918f72582833eb2321daf9d2d5b52cf..c9afb798970609a79e5c1a62538fe4ac001320d7 100644
--- a/ansible/roles/ceph_pools.yml
+++ b/ansible/roles/ceph_pools.yml
@@ -78,3 +78,23 @@
   failed_when: False
   run_once: True
   when: "{{ ceph_enable_cache | bool }}"
+
+- name: Setting cache target_max_bytes
+  command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache target_max_bytes {{ ceph_target_max_bytes }}
+  delegate_to: "{{ groups['ceph-mon'][0] }}"
+  changed_when: False
+  failed_when: False
+  run_once: True
+  when:
+    - "{{ ceph_enable_cache | bool }}"
+    - "{{ ceph_target_max_bytes != '' }}"
+
+- name: Setting cache target_max_objects
+  command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache target_max_objects {{ ceph_target_max_objects }}
+  delegate_to: "{{ groups['ceph-mon'][0] }}"
+  changed_when: False
+  failed_when: False
+  run_once: True
+  when:
+    - "{{ ceph_enable_cache | bool }}"
+    - "{{ ceph_target_max_objects != '' }}"
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index 7fec81ae94e8b4b007128b84a55eb47d85daa47f..dcd969c83f299068fd53347ac0131dbc58a7369c 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -201,6 +201,12 @@ kolla_internal_vip_address: "10.10.10.254"
 # Ceph can be setup with a caching to improve performance. To use the cache you
 # must provide separate disks than those for the OSDs
 #ceph_enable_cache: "no"
+
+# Ceph is not able to determine the size of a cache pool automatically,
+# so the configuration on the absolute size is required here, otherwise the flush/evict will not work.
+#ceph_target_max_bytes: ""
+#ceph_target_max_objects: ""
+
 # Valid options are [ forward, none, writeback ]
 #ceph_cache_mode: "writeback"