diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index e7c467bc82ece378aac30e254c35a3fc3a1718d5..7c9307cfea131b6c3926b02b43b3a6159c6c9d17 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -362,6 +362,10 @@ mariadb_wsrep_port: "4567"
 mariadb_ist_port: "4568"
 mariadb_sst_port: "4444"
 mariadb_clustercheck_port: "4569"
+mariadb_monitor_user: "haproxy"
+mariadb_default_database_shard_id: 0
+mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}"
+mariadb_loadbalancer: "haproxy"
 
 masakari_api_port: "15868"
 
diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml
index 6d1bb04724fb98467738315d91ea6557861a9c30..84a2134dbb8e61ef4c5a1963c8f8eb574d5c62b1 100644
--- a/ansible/roles/mariadb/defaults/main.yml
+++ b/ansible/roles/mariadb/defaults/main.yml
@@ -4,7 +4,7 @@ project_name: "mariadb"
 mariadb_services:
   mariadb:
     container_name: mariadb
-    group: mariadb
+    group: "{{ mariadb_shard_group }}"
     enabled: true
     image: "{{ mariadb_image_full }}"
     volumes: "{{ mariadb_default_volumes + mariadb_extra_volumes }}"
@@ -37,13 +37,13 @@ mariadb_services:
         custom_member_list: "{{ external_haproxy_members.split(';') }}"
   mariadb-clustercheck:
     container_name: mariadb_clustercheck
-    group: mariadb
+    group: "{{ mariadb_shard_group }}"
     enabled: "{{ enable_mariadb_clustercheck | bool }}"
     image: "{{ mariadb_clustercheck_image_full }}"
     volumes: "{{ mariadb_clustercheck_default_volumes + mariadb_clustercheck_extra_volumes }}"
     dimensions: "{{ mariadb_clustercheck_dimensions }}"
     environment:
-      MYSQL_USERNAME: "haproxy"
+      MYSQL_USERNAME: "{{ mariadb_monitor_user }}"
       MYSQL_PASSWORD: ""
       MYSQL_HOST: "{{ api_interface_address }}"
       AVAILABLE_WHEN_DONOR: "1"
@@ -57,8 +57,8 @@ database_max_timeout: 120
 ####################
 # HAProxy
 ####################
-internal_haproxy_members: "{% for host in groups['mariadb'] %}server {{ hostvars[host]['ansible_hostname'] }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port }} {% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
-external_haproxy_members: "{% for host in groups['mariadb'] %}server {{ host }} {{ host }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port}} {% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
+internal_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ hostvars[host]['ansible_hostname'] }} {{ 'api' | kolla_address(host) }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
+external_haproxy_members: "{% for host in mariadb_default_database_shard_hosts %} server {{ host }} {{ host }}:{{ mariadb_port }} check {% if enable_mariadb_clustercheck | bool %}port {{ mariadb_clustercheck_port }}{% endif %} inter 2000 rise 2 fall 5{% if not loop.first %} backup{% endif %};{% endfor %}"
 
 ####################
 # Docker
@@ -95,6 +95,7 @@ mariadb_clustercheck_extra_volumes: "{{ default_extra_volumes }}"
 # Vars used within recover_cluster.yml
 ########################################
 mariadb_service: "{{ mariadb_services['mariadb'] }}"
+mariadb_recover_tmp_file_path: "/tmp/kolla_mariadb_recover_inventory_name_{{ mariadb_shard_name }}"
 
 ###############
 # WSREP options
@@ -108,12 +109,21 @@ mariabackup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ doc
 mariabackup_tag: "{{ openstack_tag }}"
 mariabackup_image_full: "{{ mariabackup_image }}:{{ mariabackup_tag }}"
 
-mariadb_backup_host: "{{ groups['mariadb'][0] }}"
+mariadb_backup_host: "{{ groups[mariadb_shard_group][0] }}"
 mariadb_backup_database_schema: "PERCONA_SCHEMA"
-mariadb_backup_database_user: "backup"
+mariadb_backup_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}backup{% else %}backup_{{ mariadb_shard_name }}{% endif %}"
 mariadb_backup_type: "full"
+mariadb_backup: "{{ mariadb_loadbalancer != 'haproxy' or inventory_hostname in mariadb_default_database_shard_hosts }}"
 
 ####################
 # Clustercheck
 ####################
 enable_mariadb_clustercheck: "yes"
+
+####################
+# Sharding
+####################
+mariadb_shard_id: "{{ mariadb_default_database_shard_id }}"
+mariadb_shard_name: "shard_{{ mariadb_shard_id }}"
+mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}"
+mariadb_shard_database_user: "{% if mariadb_loadbalancer == 'haproxy' %}{{ database_user }}{% else %}root_{{ mariadb_shard_name }}{% endif %}"
diff --git a/ansible/roles/mariadb/handlers/main.yml b/ansible/roles/mariadb/handlers/main.yml
index 9c3cb057de6e5c97728f9c2a9459dedf9b0875db..ccb10c311067175b6a49bb8869ba155354e8cb59 100644
--- a/ansible/roles/mariadb/handlers/main.yml
+++ b/ansible/roles/mariadb/handlers/main.yml
@@ -57,7 +57,7 @@
       login_port: "{{ mariadb_port }}"
       login_user: "{{ database_user }}"
       login_password: "{{ database_password }}"
-      name: "haproxy"
+      name: "{{ mariadb_monitor_user }}"
       password: ""
       host: "%"
       priv: "*.*:USAGE"
@@ -66,9 +66,9 @@
 - name: Restart MariaDB on existing cluster members
   include_tasks: 'restart_services.yml'
   when:
-    - groups.mariadb_port_alive_True is defined
-    - inventory_hostname in groups.mariadb_port_alive_True
-    - groups.mariadb_port_alive_True.index(inventory_hostname) % 4 == item
+    - groups[mariadb_shard_group + '_port_alive_True'] is defined
+    - inventory_hostname in groups[mariadb_shard_group + '_port_alive_True']
+    - groups[mariadb_shard_group + '_port_alive_True'].index(inventory_hostname) % 4 == item
     - kolla_action != "config"
   listen: restart mariadb
   loop:
@@ -81,8 +81,8 @@
   include_tasks: 'restart_services.yml'
   when:
     - bootstrap_host is not defined or bootstrap_host != inventory_hostname
-    - groups.mariadb_port_alive_False is defined
-    - inventory_hostname in groups.mariadb_port_alive_False
+    - groups[mariadb_shard_group + '_port_alive_False'] is defined
+    - inventory_hostname in groups[mariadb_shard_group + '_port_alive_False']
     - kolla_action != "config"
   listen: restart mariadb
 
diff --git a/ansible/roles/mariadb/tasks/backup.yml b/ansible/roles/mariadb/tasks/backup.yml
index 8923d91f59e9d307e2dc606dedd690f0f634503d..1bd0ca0e375dfef2fff40ccd6a15527fabe9e384 100644
--- a/ansible/roles/mariadb/tasks/backup.yml
+++ b/ansible/roles/mariadb/tasks/backup.yml
@@ -20,3 +20,4 @@
       - "kolla_logs:/var/log/kolla/"
   when:
     - inventory_hostname == mariadb_backup_host
+    - mariadb_backup | bool
diff --git a/ansible/roles/mariadb/tasks/bootstrap.yml b/ansible/roles/mariadb/tasks/bootstrap.yml
index b005088e2f3687c9659287462a757e8b78484c48..1ff1cc764bc9802f0a2685173dd20173320d2ea4 100644
--- a/ansible/roles/mariadb/tasks/bootstrap.yml
+++ b/ansible/roles/mariadb/tasks/bootstrap.yml
@@ -4,7 +4,7 @@
 - include_tasks: bootstrap_cluster.yml
   when:
     - not mariadb_cluster_exists
-    - inventory_hostname == groups['mariadb'][0]
+    - inventory_hostname == groups[mariadb_shard_group][0]
 
 - include_tasks: recover_cluster.yml
   when: mariadb_recover | default(False)
diff --git a/ansible/roles/mariadb/tasks/check.yml b/ansible/roles/mariadb/tasks/check.yml
index 69f5e032de7687b30cddedf56620c136e56ab6e0..212e2e9a10e1975c05d23f83fbca54258a3c50f9 100644
--- a/ansible/roles/mariadb/tasks/check.yml
+++ b/ansible/roles/mariadb/tasks/check.yml
@@ -1,9 +1,16 @@
 ---
-- name: Waiting for MariaDB service to be ready through VIP
+# Explicitly wait for the database to be accessible via the load balancer.
+# Sometimes it can reject connections even when all database services are up,
+# due to the health check polling in HAProxy.
+- name: Wait for MariaDB service to be ready through VIP
   become: true
-  command: "docker exec {{ mariadb_service.container_name }} mysql -h {{ database_address }} -P {{ database_port }} -u haproxy -e 'show databases;'"
+  command: >
+    docker exec {{ mariadb_service.container_name }}
+    mysql -h {{ database_address }} -P {{ database_port }}
+    -u {{ mariadb_shard_database_user }} -p{{ database_password }} -e 'show databases;'
   register: result
   until: result is success
   changed_when: False
   retries: 6
   delay: 10
+  when: mariadb_shard_id == mariadb_default_database_shard_id
diff --git a/ansible/roles/mariadb/tasks/deploy.yml b/ansible/roles/mariadb/tasks/deploy.yml
index 9e2d1d8db07b6c242a3805da9613312c13ca5dc8..348f61b84c08145aec5d66a1e99770208315bfd1 100644
--- a/ansible/roles/mariadb/tasks/deploy.yml
+++ b/ansible/roles/mariadb/tasks/deploy.yml
@@ -10,5 +10,4 @@
 
 - import_tasks: register.yml
 
-# Test haproxy user through VIP
 - import_tasks: check.yml
diff --git a/ansible/roles/mariadb/tasks/lookup_cluster.yml b/ansible/roles/mariadb/tasks/lookup_cluster.yml
index 00f71a00ebd0c2997f7176502de757a8c79075bd..aa355adcfdf433bc409f30918b819ab1d8baed80 100644
--- a/ansible/roles/mariadb/tasks/lookup_cluster.yml
+++ b/ansible/roles/mariadb/tasks/lookup_cluster.yml
@@ -9,12 +9,12 @@
 
 - name: Divide hosts by their MariaDB volume availability
   group_by:
-    key: mariadb_had_volume_{{ mariadb_volume is not changed }}
+    key: "{{ mariadb_shard_group }}_had_volume_{{ mariadb_volume is not changed }}"
   changed_when: false
 
 - name: Establish whether the cluster has already existed
   set_fact:
-    mariadb_cluster_exists: "{{ groups.mariadb_had_volume_True is defined }}"
+    mariadb_cluster_exists: "{{ groups[mariadb_shard_group + '_had_volume_True'] is defined }}"
 
 - block:
     - name: Check MariaDB service port liveness
@@ -29,7 +29,7 @@
 
     - name: Divide hosts by their MariaDB service port liveness
       group_by:
-        key: mariadb_port_alive_{{ check_mariadb_port_liveness is success }}
+        key: "{{ mariadb_shard_group }}_port_alive_{{ check_mariadb_port_liveness is success }}"
       changed_when: false
 
     - name: Fail on existing but stopped cluster
@@ -37,9 +37,9 @@
         msg: MariaDB cluster exists but is stopped. Please start it using kolla-ansible mariadb_recovery
       when:
         # NOTE(yoctozepto): we allow single-node cluster to start
-        - groups['mariadb'] | length > 1
+        - groups[mariadb_shard_group] | length > 1
         - mariadb_cluster_exists
-        - groups.mariadb_port_alive_True is not defined
+        - groups[mariadb_shard_group + '_port_alive_True'] is not defined
 
     - block:
         - name: Check MariaDB service WSREP sync status
@@ -60,19 +60,20 @@
           set_fact:
             mariadb_sync_status: "{{ check_mariadb_sync_status.stdout.split('\t')[1] }}"
       when:
-        - groups.mariadb_port_alive_True is defined
-        - inventory_hostname in groups.mariadb_port_alive_True
+        - groups[mariadb_shard_group + '_port_alive_True'] is defined
+        - inventory_hostname in groups[mariadb_shard_group + '_port_alive_True']
 
     - name: Divide hosts by their MariaDB service WSREP sync status
       group_by:
-        key: mariadb_sync_status_{{ mariadb_sync_status | default('NA') }}
+        key: "{{ mariadb_shard_group }}_sync_status_{{ mariadb_sync_status | default('NA') }}"
       changed_when: false
 
     - name: Fail when MariaDB services are not synced across the whole cluster
       fail:
         msg: MariaDB cluster is not synced. Please wait for WSREP sync before proceeding.
       when:
-        - groups.mariadb_port_alive_True is defined
-        - groups.mariadb_sync_status_Synced is not defined or
-          groups.mariadb_port_alive_True | sort != groups.mariadb_sync_status_Synced | sort
+        - groups[mariadb_shard_group + '_port_alive_True'] is defined
+        - groups[mariadb_shard_group + '_sync_status_Synced'] is not defined or
+          groups[mariadb_shard_group + '_port_alive_True'] | sort != groups[mariadb_shard_group + '_sync_status_Synced'] | sort
+
   when: not mariadb_recover | default(False)
diff --git a/ansible/roles/mariadb/tasks/main.yml b/ansible/roles/mariadb/tasks/main.yml
index bc5d1e62576375cb42a2a0d62540aa54d212db7c..b6af2212f0fed40218c72e693b3b664c33b192a6 100644
--- a/ansible/roles/mariadb/tasks/main.yml
+++ b/ansible/roles/mariadb/tasks/main.yml
@@ -1,2 +1,7 @@
 ---
+- name: Group MariaDB hosts based on shards
+  group_by:
+    key: "{{ mariadb_shard_group }}"
+  changed_when: false
+
 - include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml
index c26c946c3cf86d8dbafa64bb1753a17444218cde..dd426f3bf460712c7910eaeaecf5dc42086288d1 100644
--- a/ansible/roles/mariadb/tasks/recover_cluster.yml
+++ b/ansible/roles/mariadb/tasks/recover_cluster.yml
@@ -4,21 +4,15 @@
     msg: "MariaDB cluster was not found. Is your inventory correct?"
   when: not mariadb_cluster_exists
 
-- name: Cleaning up temp file on mariadb hosts
-  file:
-    path: /tmp/kolla_mariadb_grastate.dat
-    state: absent
-  changed_when: false
-  check_mode: no
-
 - name: Cleaning up temp file on localhost
   file:
-    path: /tmp/kolla_mariadb_recover_inventory_name
+    path: "{{ item }}"
     state: absent
   delegate_to: localhost
   changed_when: false
   check_mode: no
   run_once: true
+  with_fileglob: "/tmp/kolla_mariadb_recover_inventory_name_*"
 
 - block:
     - name: Stop MariaDB containers
@@ -76,7 +70,7 @@
           if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} &&
           {{ hostvars[inventory_hostname]['seqno'] }} =~ ^-?[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^-?[0-9]+$ &&
           {{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi
-      with_items: "{{ groups['mariadb'] }}"
+      with_items: "{{ groups[mariadb_shard_group] }}"
       register: seqno_compare
       args:
         executable: /bin/bash
@@ -85,7 +79,7 @@
     - name: Writing hostname of host with the largest seqno to temp file
       copy:
         content: "{{ inventory_hostname }}"
-        dest: /tmp/kolla_mariadb_recover_inventory_name
+        dest: "{{ mariadb_recover_tmp_file_path }}"
         mode: 0644
       delegate_to: localhost
       changed_when: false
@@ -93,7 +87,7 @@
 
     - name: Registering mariadb_recover_inventory_name from temp file
       set_fact:
-        mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
+        mariadb_recover_inventory_name: "{{ lookup('file', mariadb_recover_tmp_file_path) }}"
   when:
     - mariadb_recover_inventory_name is not defined
 
@@ -230,4 +224,4 @@
     - bootstrap_host is defined
     - bootstrap_host == inventory_hostname
 
-- import_tasks: wait_for_loadbalancer.yml
+- import_tasks: check.yml
diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml
index ab60c16ccb8ddc8e3d7f1ce631cfa2a298959016..68d00bf6c576d9a62b4720557b98eb1b4cc8592d 100644
--- a/ansible/roles/mariadb/tasks/register.yml
+++ b/ansible/roles/mariadb/tasks/register.yml
@@ -1,52 +1,82 @@
 ---
-- import_tasks: wait_for_loadbalancer.yml
+- name: Creating shard root mysql user
+  become: true
+  kolla_toolbox:
+    module_name: mysql_user
+    module_args:
+      login_host: "{{ api_interface_address }}"
+      login_port: "{{ mariadb_port }}"
+      login_user: "{{ database_user }}"
+      login_password: "{{ database_password }}"
+      name: "{{ mariadb_shard_database_user }}"
+      password: "{{ database_password }}"
+      host: "%"
+      priv: "*.*:ALL,GRANT"
+  when:
+    - inventory_hostname == groups[mariadb_shard_group][0]
+
+- name: Creating mysql monitor user
+  become: true
+  kolla_toolbox:
+    module_name: mysql_user
+    module_args:
+      login_host: "{{ api_interface_address }}"
+      login_port: "{{ mariadb_port }}"
+      login_user: "{{ database_user }}"
+      login_password: "{{ database_password }}"
+      name: "{{ mariadb_monitor_user }}"
+      password: ""
+      host: "%"
+      priv: "*.*:USAGE"
+  when:
+    - inventory_hostname == groups[mariadb_shard_group][0]
 
 - name: Creating the Mariabackup database
   become: true
   kolla_toolbox:
     module_name: mysql_db
     module_args:
-      login_host: "{{ database_address }}"
-      login_port: "{{ database_port }}"
-      login_user: "{{ database_user }}"
+      login_host: "{{ api_interface_address }}"
+      login_port: "{{ mariadb_port }}"
+      login_user: "{{ mariadb_shard_database_user }}"
       login_password: "{{ database_password }}"
       name: "{{ mariadb_backup_database_schema }}"
-  run_once: True
   when:
     - enable_mariabackup | bool
+    - inventory_hostname == mariadb_backup_host
 
 - name: Creating database backup user and setting permissions
   become: true
   kolla_toolbox:
     module_name: mysql_user
     module_args:
-      login_host: "{{ database_address }}"
-      login_port: "{{ database_port }}"
-      login_user: "{{ database_user }}"
+      login_host: "{{ api_interface_address }}"
+      login_port: "{{ mariadb_port }}"
+      login_user: "{{ mariadb_shard_database_user }}"
       login_password: "{{ database_password }}"
       name: "{{ mariadb_backup_database_user }}"
       password: "{{ mariadb_backup_database_password }}"
       host: "%"
       priv: "*.*:CREATE TABLESPACE,RELOAD,PROCESS,SUPER,LOCK TABLES,REPLICATION CLIENT"
       append_privs: True
-  run_once: True
   when:
     - enable_mariabackup | bool
+    - inventory_hostname == mariadb_backup_host
 
 - name: Granting permissions on Mariabackup database to backup user
   become: true
   kolla_toolbox:
     module_name: mysql_user
     module_args:
-      login_host: "{{ database_address }}"
-      login_port: "{{ database_port }}"
-      login_user: "{{ database_user }}"
+      login_host: "{{ api_interface_address }}"
+      login_port: "{{ mariadb_port }}"
+      login_user: "{{ mariadb_shard_database_user }}"
       login_password: "{{ database_password }}"
       name: "{{ mariadb_backup_database_user }}"
       password: "{{ mariadb_backup_database_password }}"
       host: "%"
       priv: "{{ mariadb_backup_database_schema }}.*:CREATE,INSERT,SELECT"
       append_privs: True
-  run_once: True
   when:
     - enable_mariabackup | bool
+    - inventory_hostname == mariadb_backup_host
diff --git a/ansible/roles/mariadb/tasks/restart_services.yml b/ansible/roles/mariadb/tasks/restart_services.yml
index 86768d309cef2ae6de0339b31c048557c59189d4..dff5cafb103cee9ebf127729ad85e711b916446f 100644
--- a/ansible/roles/mariadb/tasks/restart_services.yml
+++ b/ansible/roles/mariadb/tasks/restart_services.yml
@@ -42,5 +42,5 @@
     # NOTE(yoctozepto): we don't want to wait for new nodes to fully sync
     # with an existing cluster as this could take time
     - not mariadb_cluster_exists or
-      (groups.mariadb_port_alive_True is defined and
-      inventory_hostname in groups.mariadb_port_alive_True)
+      (groups[mariadb_shard_group + '_port_alive_True'] is defined and
+      inventory_hostname in groups[mariadb_shard_group + '_port_alive_True'])
diff --git a/ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml b/ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml
deleted file mode 100644
index da6bd55c2ed955b052ac513b19397d85c2074351..0000000000000000000000000000000000000000
--- a/ansible/roles/mariadb/tasks/wait_for_loadbalancer.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# Explicitly wait for the database to be accessible via the load balancer.
-# Sometimes it can reject connections even when all database services are up,
-# due to the health check polling in HAProxy.
-- name: wait for MariaDB to be available via HAProxy
-  wait_for:
-    host: "{{ database_address }}"
-    port: "{{ database_port }}"
-    connect_timeout: 1
-    timeout: 60
-    search_regex: "MariaDB"
-  register: check_mariadb_port
-  until: check_mariadb_port is success
-  retries: 10
-  delay: 6
-  run_once: True
diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2
index 192a3b8ccea75ed569f33293c074f1307c32ed5b..023f824b40fdb8c195b75629a4139c7cd46d8b5d 100644
--- a/ansible/roles/mariadb/templates/galera.cnf.j2
+++ b/ansible/roles/mariadb/templates/galera.cnf.j2
@@ -26,7 +26,7 @@ character-set-server = utf8
 
 datadir=/var/lib/mysql/
 
-wsrep_cluster_address=gcomm://{% if (groups['mariadb'] | length) > 1 %}{% for host in groups['mariadb'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
+wsrep_cluster_address=gcomm://{% if (groups[mariadb_shard_group] | length) > 1 %}{% for host in groups[mariadb_shard_group] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
 
 wsrep_provider_options=gmcast.listen_addr=tcp://{{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address | put_address_in_context('url') }}:{{ mariadb_ist_port }};{% for option in mariadb_wsrep_extra_provider_options %}{{ option }}{% if not loop.last %};{% endif %}{% endfor %}
 
diff --git a/releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml b/releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9da4debc430eb31e752602c0e2bd8a607114af0e
--- /dev/null
+++ b/releasenotes/notes/mariadb-role-install-several-clusters-at-once-110057a091600d2c.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    The Mariadb role now allows the creation of multiple clusters.
+    This provides a benefit to operators as they are able to install
+    and maintain several clusters at once using kolla-ansible.
+    This is useful when deploying db clusters for cells or db clusters
+    for services that have large demands on the database.