diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index df0a135168a65c18bae280e6f985f6f1e0d1138d..a0f9f65ac21c3f3279da8b8ee7cf65a5770bffdf 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -159,6 +159,33 @@ docker_common_options:
   restart_retries: "{{ docker_restart_policy_retry }}"
   graceful_timeout: "{{ docker_graceful_timeout }}"
   client_timeout: "{{ docker_client_timeout }}"
+  container_engine: "{{ kolla_container_engine }}"
+
+# Container engine specific volume paths
+docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes"
+podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes"
+container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}"
+
+#####################
+# Volumes under /run
+#####################
+# Podman has problem with mounting whole /run directory
+# described here: https://github.com/containers/podman/issues/16305
+run_default_volumes_podman:
+  - '/run/netns:/run/netns:shared'
+  - '/run/lock/nova:/run/lock/nova:shared'
+  - "/run/libvirt:/run/libvirt:shared"
+  - "/run/nova:/run/nova:shared"
+  - "/run/openvswitch:/run/openvswitch:shared"
+
+run_default_volumes_docker: []
+
+run_default_subdirectories:
+  - '/run/netns'
+  - '/run/lock/nova'
+  - "/run/libvirt"
+  - "/run/nova"
+  - "/run/openvswitch"
 
 ####################
 # Dimensions options
@@ -167,11 +194,20 @@ docker_common_options:
 # NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9)
 #                  fixes at least rabbitmq and mariadb
 default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}"
-default_container_dimensions_el9:
+default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}"
+default_docker_dimensions_el9:
   ulimits:
     nofile:
       soft: 1048576
       hard: 1048576
+default_podman_dimensions_el9:
+  ulimits:
+    RLIMIT_NOFILE:
+      soft: 1048576
+      hard: 1048576
+    RLIMIT_NPROC:
+      soft: 1048576
+      hard: 1048576
 
 #####################
 # Healthcheck options
diff --git a/ansible/library/kolla_container_facts.py b/ansible/library/kolla_container_facts.py
index d9626097c2a9e5dcc70306a07b23b8f3e3034d23..3f75a44789992d047857143f2ba658e022fab027 100644
--- a/ansible/library/kolla_container_facts.py
+++ b/ansible/library/kolla_container_facts.py
@@ -13,10 +13,9 @@
 # limitations under the License.
 
 
-import docker
-
 from ansible.module_utils.basic import AnsibleModule
 
+
 DOCUMENTATION = '''
 ---
 module: kolla_container_facts
@@ -41,6 +40,11 @@ options:
       - Name or names of the containers
     required: False
     type: str or list
+  container_engine:
+    description:
+      - Name of container engine to use
+    required: True
+    type: str
 author: Jeffrey Zhang
 '''
 
@@ -49,6 +53,7 @@ EXAMPLES = '''
   tasks:
     - name: Gather docker facts
       kolla_container_facts:
+        container_engine: docker
 
     - name: Gather glance container facts
       kolla_container_facts:
@@ -56,24 +61,18 @@ EXAMPLES = '''
         name:
           - glance_api
           - glance_registry
+        container_engine: podman
 '''
 
 
 def get_docker_client():
+    import docker
     return docker.APIClient
 
 
-def main():
-    argument_spec = dict(
-        name=dict(required=False, type='list', default=[]),
-        api_version=dict(required=False, type='str', default='auto'),
-        container_engine=dict(required=True, type='str')
-    )
-
-    module = AnsibleModule(argument_spec=argument_spec)
-
-    results = dict(changed=False, _containers=[])
+def use_docker(module, results):
     client = get_docker_client()(version=module.params.get('api_version'))
+
     containers = client.containers()
     names = module.params.get('name')
     if names and not isinstance(names, list):
@@ -86,6 +85,46 @@ def main():
                 continue
             results['_containers'].append(container)
             results[container_name] = container
+
+
+def use_podman(module, results):
+    import podman.errors as pe
+    from podman import PodmanClient
+
+    client = PodmanClient(base_url="http+unix:/run/podman/podman.sock")
+
+    try:
+        containers = client.containers.list(all=True, ignore_removed=True)
+    except pe.APIError as e:
+        module.fail_json(failed=True, msg=f"Internal error: {e.explanation}")
+    names = module.params.get('name')
+    if names and not isinstance(names, list):
+        names = [names]
+
+    for container in containers:
+        container.reload()
+        container_name = container.attrs['Name']
+        if container_name not in names:
+            continue
+        results['_containers'].append(container.attrs)
+        results[container_name] = container.attrs
+
+
+def main():
+    argument_spec = dict(
+        name=dict(required=False, type='list', default=[]),
+        api_version=dict(required=False, type='str', default='auto'),
+        container_engine=dict(required=True, type='str')
+    )
+
+    module = AnsibleModule(argument_spec=argument_spec)
+
+    results = dict(changed=False, _containers=[])
+    if module.params['container_engine'] == 'podman':
+        use_podman(module, results)
+    else:
+        use_docker(module, results)
+
     module.exit_json(**results)
 
 
diff --git a/ansible/library/kolla_container_volume_facts.py b/ansible/library/kolla_container_volume_facts.py
index b1f471d229be8bd0948c5294e6986428dc9895cb..a5aba0c9e1a85c942a9de6c461dfa0b255ba60fb 100644
--- a/ansible/library/kolla_container_volume_facts.py
+++ b/ansible/library/kolla_container_volume_facts.py
@@ -13,16 +13,14 @@
 # limitations under the License.
 
 
-import docker
-
 from ansible.module_utils.basic import AnsibleModule
 
 DOCUMENTATION = '''
 ---
 module: kolla_container_volume_facts
-short_description: Module for collecting Docker container volume facts
+short_description: Module for collecting container volume facts
 description:
-  - A module targeted at collecting Docker container volume facts. It is used
+  - A module targeted at collecting container volume facts. It is used
     for detecting whether the container volume exists on a host.
 options:
   container_engine:
@@ -60,9 +58,25 @@ EXAMPLES = '''
 
 
 def get_docker_client():
+    import docker
     return docker.APIClient
 
 
+def get_docker_volumes(api_version):
+    client = get_docker_client()(version=api_version)
+    return client.volumes()['Volumes']
+
+
+def get_podman_volumes():
+    from podman import PodmanClient
+
+    client = PodmanClient(base_url="http+unix:/run/podman/podman.sock")
+    volumes = []
+    for volume in client.volumes.list():
+        volumes.append(volume.attrs)
+    return volumes
+
+
 def main():
     argument_spec = dict(
         name=dict(required=False, type='list', default=[]),
@@ -73,12 +87,15 @@ def main():
     module = AnsibleModule(argument_spec=argument_spec)
 
     results = dict(changed=False, _volumes=[])
-    client = get_docker_client()(version=module.params.get('api_version'))
-    volumes = client.volumes()
+    if module.params.get('container_engine') == 'docker':
+        volumes = get_docker_volumes(module.params.get('api_version'))
+    else:
+        volumes = get_podman_volumes()
+
     names = module.params.get('name')
     if names and not isinstance(names, list):
         names = [names]
-    for volume in volumes['Volumes']:
+    for volume in volumes:
         volume_name = volume['Name']
         if names and volume_name not in names:
             continue
diff --git a/ansible/library/kolla_docker.py b/ansible/library/kolla_docker.py
index 93788e35f9d02d3804eb86c6d871f11ef0def6ce..c19b7eb4f697c0d7eabfbe5f4fa4202583ac4306 100644
--- a/ansible/library/kolla_docker.py
+++ b/ansible/library/kolla_docker.py
@@ -16,22 +16,25 @@
 # a hacky way to seed most usages of kolla_docker in kolla-ansible ansible
 # playbooks - caution has to be exerted when setting "common_options"
 
-import traceback
+# FIXME(yoctozepto): restart_policy is *not* checked in the container
 
 from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.kolla_docker_worker import DockerWorker
+import traceback
+
+from ansible.module_utils.kolla_container_worker import ContainerWorker
+
 
 DOCUMENTATION = '''
 ---
 module: kolla_docker
-short_description: Module for controlling Docker
+short_description: Module for controlling containers
 description:
-     - A module targeting at controlling Docker as used by Kolla.
+     - A module targeting at controlling container engine as used by Kolla.
 options:
   common_options:
     description:
       - A dict containing common params such as login info
-    required: False
+    required: True
     type: dict
     default: dict()
   action:
@@ -397,17 +400,23 @@ def generate_module():
 def main():
     module = generate_module()
 
-    dw = None
+    cw: ContainerWorker = None
     try:
-        dw = DockerWorker(module)
+        if module.params.get('container_engine') == 'docker':
+            from ansible.module_utils.kolla_docker_worker import DockerWorker
+            cw = DockerWorker(module)
+        else:
+            from ansible.module_utils.kolla_podman_worker import PodmanWorker
+            cw = PodmanWorker(module)
+
         # TODO(inc0): We keep it bool to have ansible deal with consistent
         # types. If we ever add method that will have to return some
         # meaningful data, we need to refactor all methods to return dicts.
-        result = bool(getattr(dw, module.params.get('action'))())
-        module.exit_json(changed=dw.changed, result=result, **dw.result)
+        result = bool(getattr(cw, module.params.get('action'))())
+        module.exit_json(changed=cw.changed, result=result, **cw.result)
     except Exception:
         module.fail_json(changed=True, msg=repr(traceback.format_exc()),
-                         **getattr(dw, 'result', {}))
+                         **getattr(cw, 'result', {}))
 
 
 if __name__ == '__main__':
diff --git a/ansible/library/kolla_toolbox.py b/ansible/library/kolla_toolbox.py
index 388d678add727ea7d1c7070036803affe4b2d6fc..acfc8a15aed1117740dc2b023cc973ddaa9d92f5 100644
--- a/ansible/library/kolla_toolbox.py
+++ b/ansible/library/kolla_toolbox.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 
 from distutils.version import StrictVersion
-import docker
 import json
 import re
 
@@ -21,6 +20,7 @@ from ansible.module_utils.ansible_release import __version__ as ansible_version
 from ansible.module_utils.basic import AnsibleModule
 
 from ast import literal_eval
+from shlex import split
 
 DOCUMENTATION = '''
 ---
@@ -120,10 +120,19 @@ def gen_commandline(params):
         if StrictVersion(ansible_version) < StrictVersion('2.11.0'):
             module_args = params.get('module_args')
         else:
-            module_args = literal_eval(params.get('module_args'))
+            try:
+                module_args = literal_eval(params.get('module_args'))
+            except SyntaxError:
+                if not isinstance(params.get('module_args'), str):
+                    raise
+
+                # account for string arguments
+                module_args = split(params.get('module_args'))
         if isinstance(module_args, dict):
             module_args = ' '.join("{}='{}'".format(key, value)
                                    for key, value in module_args.items())
+        if isinstance(module_args, list):
+            module_args = ' '.join(module_args)
         command.extend(['-a', module_args])
     if params.get('module_extra_vars'):
         extra_vars = params.get('module_extra_vars')
@@ -134,6 +143,7 @@ def gen_commandline(params):
 
 
 def get_docker_client():
+    import docker
     return docker.APIClient
 
 
@@ -142,17 +152,7 @@ def docker_supports_environment_in_exec(client):
     return docker_version >= StrictVersion('1.25')
 
 
-def main():
-    specs = dict(
-        container_engine=dict(required=True, type='str'),
-        module_name=dict(required=True, type='str'),
-        module_args=dict(type='str'),
-        module_extra_vars=dict(type='json'),
-        api_version=dict(required=False, type='str', default='auto'),
-        timeout=dict(required=False, type='int', default=180),
-        user=dict(required=False, type='str'),
-    )
-    module = AnsibleModule(argument_spec=specs, bypass_checks=True)
+def use_docker(module):
     client = get_docker_client()(
         version=module.params.get('api_version'),
         timeout=module.params.get('timeout'))
@@ -240,7 +240,83 @@ def main():
                 # No way to know whether changed - assume yes.
                 ret['changed'] = True
 
-    module.exit_json(**ret)
+    return ret
+
+
+def get_kolla_toolbox():
+    from podman import PodmanClient
+
+    with PodmanClient(base_url="http+unix:/run/podman/podman.sock") as client:
+        for cont in client.containers.list(all=True):
+            cont.reload()
+            if cont.name == 'kolla_toolbox' and cont.status == 'running':
+                return cont
+
+
+def use_podman(module):
+    from podman.errors.exceptions import APIError
+
+    try:
+        kolla_toolbox = get_kolla_toolbox()
+        if not kolla_toolbox:
+            module.fail_json(msg='kolla_toolbox container is not running.')
+
+        kwargs = {}
+        if 'user' in module.params:
+            kwargs['user'] = module.params['user']
+        environment = {"ANSIBLE_STDOUT_CALLBACK": "json",
+                       "ANSIBLE_LOAD_CALLBACK_PLUGINS": "True"}
+        command_line = gen_commandline(module.params)
+
+        _, raw_output = kolla_toolbox.exec_run(
+            command_line,
+            environment=environment,
+            tty=True,
+            **kwargs
+        )
+    except APIError as e:
+        module.fail_json(msg=f'Encountered Podman API error: {e.explanation}')
+
+    try:
+        json_output = raw_output.decode('utf-8')
+        output = json.loads(json_output)
+    except Exception:
+        module.fail_json(
+            msg='Can not parse the inner module output: %s' % json_output)
+
+    try:
+        ret = output['plays'][0]['tasks'][0]['hosts']['localhost']
+    except (KeyError, IndexError):
+        module.fail_json(
+            msg='Ansible JSON output has unexpected format: %s' % output)
+
+    # Remove Ansible's internal variables from returned fields.
+    ret.pop('_ansible_no_log', None)
+
+    return ret
+
+
+def main():
+    specs = dict(
+        container_engine=dict(required=True, type='str'),
+        module_name=dict(required=True, type='str'),
+        module_args=dict(type='str'),
+        module_extra_vars=dict(type='json'),
+        api_version=dict(required=False, type='str', default='auto'),
+        timeout=dict(required=False, type='int', default=180),
+        user=dict(required=False, type='str'),
+    )
+    module = AnsibleModule(argument_spec=specs, bypass_checks=True)
+
+    container_engine = module.params.get('container_engine').lower()
+    if container_engine == 'docker':
+        result = use_docker(module)
+    elif container_engine == 'podman':
+        result = use_podman(module)
+    else:
+        module.fail_json(msg='Missing or invalid container engine.')
+
+    module.exit_json(**result)
 
 
 if __name__ == "__main__":
diff --git a/ansible/module_utils/kolla_docker_worker.py b/ansible/module_utils/kolla_docker_worker.py
index 2741b006d4c9f24e3dd09b1a962bf1e683aff78e..d3224f160b33361f5bcd0b161cac564c7d51013d 100644
--- a/ansible/module_utils/kolla_docker_worker.py
+++ b/ansible/module_utils/kolla_docker_worker.py
@@ -12,8 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# FIXME(yoctozepto): restart_policy is *not* checked in the container
-
 import docker
 import json
 import os
diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0bf7518119e67680961da528a7751231abfb711
--- /dev/null
+++ b/ansible/module_utils/kolla_podman_worker.py
@@ -0,0 +1,674 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from podman.errors import APIError
+from podman import PodmanClient
+
+import shlex
+
+from ansible.module_utils.kolla_container_worker import COMPARE_CONFIG_CMD
+from ansible.module_utils.kolla_container_worker import ContainerWorker
+
+uri = "http+unix:/run/podman/podman.sock"
+
+CONTAINER_PARAMS = [
+    'name',             # string
+    'cap_add',          # list
+    'cgroupns',         # 'str',choices=['private', 'host']
+    'command',          # arrray of strings  -- docker string
+
+    # this part is hidden inside dimensions
+    'cpu_period',       # int
+    'cpu_quota',        # int
+    'cpuset_cpus',      # str
+    'cpu_shares'        # int
+    'cpuset_mems',      # str
+    'kernel_memory',    # int or string
+    'mem_limit',        # (Union[int, str])
+    'mem_reservation',  # (Union[int, str]): Memory soft limit.
+    'memswap_limit'     # (Union[int, str]): Maximum amount of memory
+                        # + swap a container is allowed to consume.
+    'ulimits',          # List[Ulimit]
+    'blkio_weight',     # int between 10 and 1000
+
+
+    'detach',           # bool
+    'entrypoint',       # string
+    'environment',      # dict docker - environment - dictionary
+    'healthcheck',      # same schema as docker -- healthcheck
+    'image',            # string
+    'ipc_mode',         # string only option is host
+
+    'labels',           # dict
+    'netns',            # dict # TODO(i.halomi) - not sure how it works
+    'network_options',  # string - none,bridge,host,container:id,
+                        # missing in docker but needs to be host
+    'pid_mode',         # "string"  host, private or ''
+    'privileged',       # bool
+    'restart_policy',   # set to none, handled by systemd
+    'remove',           # bool
+    'restart_tries',    # int doesnt matter done by systemd
+    'stop_timeout',     # int
+    'tty'               # bool
+    # VOLUMES NOT WORKING HAS TO BE DONE WITH MOUNTS
+    'volumes',          # array of dict
+    'volumes_from',     # array of strings
+]
+
+
+class PodmanWorker(ContainerWorker):
+
+    def __init__(self, module) -> None:
+        super().__init__(module)
+
+        self.pc = PodmanClient(base_url=uri)
+
+    def prepare_container_args(self):
+        args = dict(
+            network_mode='host'
+        )
+
+        command = self.params.pop('command', '')
+        self.params['command'] = shlex.split(command)
+
+        #  we have to transform volumes into mounts because podman-py
+        #  functionality is broken
+        mounts = []
+        filtered_volumes = {}
+        volumes = self.params.get('volumes', [])
+        if volumes:
+            self.parse_volumes(volumes, mounts, filtered_volumes)
+            # we can delete original volumes so it won't raise error later
+            self.params.pop('volumes', None)
+
+        args['mounts'] = mounts
+        args['volumes'] = filtered_volumes
+
+        # in case value is not string it has to be converted
+        environment = self.params.get('environment')
+        if environment:
+            for key, value in environment.items():
+                environment[key] = str(value)
+
+        healthcheck = self.params.get('healthcheck')
+        if healthcheck:
+            healthcheck = self.parse_healthcheck(healthcheck)
+            self.params.pop('healthcheck', None)
+            args.update(healthcheck)
+
+        # getting dimensions into separate parameters
+        dimensions = self.params.get('dimensions')
+        if dimensions:
+            dimensions = self.parse_dimensions(dimensions)
+            args.update(dimensions)
+
+        # NOTE(m.hiner): currently unsupported by Podman API
+        # args['tmpfs'] = self.generate_tmpfs()
+        self.params.pop('tmpfs', None)
+
+        # NOTE(m.hiner): in case containers are not privileged,
+        # they need this capability
+        if not self.params.get('privileged', False):
+            args['cap_add'] = self.params.pop('cap_add', []) + ['AUDIT_WRITE']
+
+        # maybe can be done straight away,
+        # at first it was around 6 keys that's why it is this way
+        convert_keys = dict(
+            graceful_timeout='stop_timeout',
+            cgroupns_mode='cgroupns'
+        )
+
+        # remap differing args
+        for key_orig, key_new in convert_keys.items():
+            if key_orig in self.params:
+                value = self.params.get(key_orig, None)
+
+                if value is not None:
+                    args[key_new] = value
+
+        # record remaining args
+        for key, value in self.params.items():
+            if key in CONTAINER_PARAMS and value is not None:
+                args[key] = value
+
+        args.pop('restart_policy', None)    # handled by systemd
+
+        return args
+
+    def parse_volumes(self, volumes, mounts, filtered_volumes):
+        # we can ignore empty strings
+        volumes = [item for item in volumes if item.strip()]
+
+        for item in volumes:
+            # if it starts with / it is bind not volume
+            if item[0] == '/':
+                mode = None
+                try:
+                    if item.count(':') == 2:
+                        src, dest, mode = item.split(':')
+                    else:
+                        src, dest = item.split(':')
+                except ValueError:
+                    self.module.fail_json(
+                        msg="Wrong format of volume: {}".format(item),
+                        failed=True
+                    )
+
+                mount_item = dict(
+                    source=src,
+                    target=dest,
+                    type='bind',
+                    propagation='rprivate'
+                )
+                if mode == 'ro':
+                    mount_item['read_only'] = True
+                if mode == 'shared':
+                    mount_item['propagation'] = 'shared'
+                mounts.append(mount_item)
+            else:
+                try:
+                    src, dest = item.split(':')
+                except ValueError:
+                    self.module.fail_json(
+                        msg="Wrong format of volume: {}".format(item),
+                        failed=True
+                    )
+                if src == 'devpts':
+                    mount_item = dict(
+                        target=dest,
+                        type='devpts'
+                    )
+                    mounts.append(mount_item)
+                else:
+                    filtered_volumes[src] = dict(
+                        bind=dest,
+                        mode='rw'
+                    )
+
+    def parse_dimensions(self, dimensions):
+        dimensions = dimensions.copy()
+
+        supported = {'cpu_period', 'cpu_quota', 'cpu_shares',
+                     'cpuset_cpus', 'cpuset_mems', 'mem_limit',
+                     'mem_reservation', 'memswap_limit',
+                     'kernel_memory', 'blkio_weight', 'ulimits'}
+        unsupported = set(dimensions) - supported
+        if unsupported:
+            self.module.exit_json(failed=True,
+                                  msg=repr("Unsupported dimensions"),
+                                  unsupported_dimensions=unsupported)
+
+        ulimits = dimensions.get('ulimits', {})
+        if ulimits:
+            # NOTE(m.hiner): default ulimits have to be filtered out because
+            # Podman would treat them as new ulimits and break the container
+            # as a result. Names are a copy of
+            # default_podman_dimensions_el9 in /ansible/group_vars/all.yml
+            for name in ['RLIMIT_NOFILE', 'RLIMIT_NPROC']:
+                ulimits.pop(name, None)
+
+            dimensions['ulimits'] = self.build_ulimits(ulimits)
+
+        return dimensions
+
+    def parse_healthcheck(self, healthcheck):
+        hc = super().parse_healthcheck(healthcheck)
+
+        # rename key to right format
+        if hc:
+            sp = hc['healthcheck'].pop('start_period', None)
+            if sp:
+                hc['healthcheck']['StartPeriod'] = sp
+
+        return hc
+
+    def prepare_image_args(self):
+        image, tag = self.parse_image()
+
+        args = dict(
+            repository=image,
+            tag=tag,
+            tls_verify=self.params.get('tls_verify', False),
+            stream=False
+        )
+
+        if self.params.get('auth_username', False):
+            args['auth_config'] = dict(
+                username=self.params.get('auth_username'),
+                password=self.params.get('auth_password', "")
+            )
+
+        if '/' not in image and self.params.get('auth_registry', False):
+            args['image'] = self.params['auth_registry'] + '/' + image
+        return args
+
+    def check_image(self):
+        try:
+            image = self.pc.images.get(self.params.get('image'))
+            return image.attrs
+        except APIError as e:
+            if e.status_code == 404:
+                return {}
+            else:
+                self.module.fail_json(
+                    failed=True,
+                    msg="Internal error: {}".format(
+                        e.explanation
+                    )
+                )
+
+    def check_volume(self):
+        try:
+            vol = self.pc.volumes.get(self.params.get('name'))
+            return vol.attrs
+        except APIError as e:
+            if e.status_code == 404:
+                return {}
+
+    def check_container(self):
+        name = self.params.get("name")
+        for cont in self.pc.containers.list(all=True):
+            cont.reload()
+            if name == cont.name:
+                return cont
+
+    def get_container_info(self):
+        container = self.check_container()
+        if not container:
+            return None
+
+        return container.attrs
+
+    def compare_container(self):
+        container = self.check_container()
+        if (not container or
+                self.check_container_differs() or
+                self.compare_config() or
+                self.systemd.check_unit_change()):
+            self.changed = True
+        return self.changed
+
+    def compare_pid_mode(self, container_info):
+        new_pid_mode = self.params.get('pid_mode') or self.params.get('pid')
+        current_pid_mode = container_info['HostConfig'].get('PidMode')
+
+        if not current_pid_mode:
+            current_pid_mode = None
+
+        # podman default pid_mode
+        if new_pid_mode is None and current_pid_mode == 'private':
+            return False
+
+        if new_pid_mode != current_pid_mode:
+            return True
+
+    def compare_image(self, container_info=None):
+        def parse_tag(tag):
+            splits = tag.rsplit('/', 1)
+            return splits[-1]
+
+        container_info = container_info or self.get_container_info()
+        if not container_info:
+            return True
+
+        new_image = self.check_image()
+        current_image = container_info['Image']
+        if not new_image:
+            return True
+        if new_image['Id'] != current_image:
+            return True
+        # compare name:tag
+        elif (parse_tag(self.params.get('image')) !=
+              parse_tag(container_info['Config']['Image'])):
+            return True
+
+    def compare_volumes(self, container_info):
+        def check_slash(string):
+            if not string:
+                return string
+            if string[-1] != '/':
+                return string + '/'
+            else:
+                return string
+
+        raw_volumes, binds = self.generate_volumes()
+        raw_vols, current_binds = self.generate_volumes(
+            container_info['HostConfig'].get('Binds'))
+
+        current_vols = [check_slash(vol) for vol in raw_vols if vol]
+        volumes = [check_slash(vol) for vol in raw_volumes if vol]
+
+        if not volumes:
+            volumes = list()
+        if not current_vols:
+            current_vols = list()
+        if not current_binds:
+            current_binds = list()
+
+        volumes.sort()
+        current_vols.sort()
+
+        if set(volumes).symmetric_difference(set(current_vols)):
+            return True
+
+        new_binds = list()
+        new_current_binds = list()
+        if binds:
+            for k, v in binds.items():
+                k = check_slash(k)
+                v['bind'] = check_slash(v['bind'])
+                new_binds.append(
+                    "{}:{}:{}".format(k, v['bind'], v['mode']))
+
+        if current_binds:
+            for k, v in current_binds.items():
+                k = check_slash(k)
+                v['bind'] = check_slash(v['bind'])
+                if 'ro' in v['mode']:
+                    v['mode'] = 'ro'
+                else:
+                    v['mode'] = 'rw'
+                new_current_binds.append(
+                    "{}:{}:{}".format(k, v['bind'], v['mode'][0:2]))
+
+        new_binds.sort()
+        new_current_binds.sort()
+
+        if set(new_binds).symmetric_difference(set(new_current_binds)):
+            return True
+
+    def compare_dimensions(self, container_info):
+        new_dimensions = self.params.get('dimensions')
+
+        # NOTE(mgoddard): The names used by Docker are inconsisent between
+        # configuration of a container's resources and the resources in
+        # container_info['HostConfig']. This provides a mapping between the
+        # two.
+        dimension_map = {
+            'mem_limit': 'Memory', 'mem_reservation': 'MemoryReservation',
+            'memswap_limit': 'MemorySwap', 'cpu_period': 'CpuPeriod',
+            'cpu_quota': 'CpuQuota', 'cpu_shares': 'CpuShares',
+            'cpuset_cpus': 'CpusetCpus', 'cpuset_mems': 'CpusetMems',
+            'kernel_memory': 'KernelMemory', 'blkio_weight': 'BlkioWeight',
+            'ulimits': 'Ulimits'}
+        unsupported = set(new_dimensions.keys()) - \
+            set(dimension_map.keys())
+        if unsupported:
+            self.module.exit_json(
+                failed=True, msg=repr("Unsupported dimensions"),
+                unsupported_dimensions=unsupported)
+        current_dimensions = container_info['HostConfig']
+        for key1, key2 in dimension_map.items():
+            # NOTE(mgoddard): If a resource has been explicitly requested,
+            # check for a match. Otherwise, ensure it is set to the default.
+            if key1 in new_dimensions:
+                if key1 == 'ulimits':
+                    if self.compare_ulimits(new_dimensions[key1],
+                                            current_dimensions[key2]):
+                        return True
+                elif new_dimensions[key1] != current_dimensions[key2]:
+                    return True
+            elif current_dimensions[key2]:
+                # The default values of all (except ulimits) currently
+                # supported resources are '' or 0 - both falsey.
+                return True
+
+    def compare_config(self):
+        try:
+            container = self.pc.containers.get(self.params['name'])
+            container.reload()
+            if container.status != 'running':
+                return True
+
+            rc, raw_output = container.exec_run(COMPARE_CONFIG_CMD,
+                                                user='root')
+        except APIError as e:
+            if e.is_client_error():
+                return True
+            else:
+                raise
+        # Exit codes:
+        # 0: not changed
+        # 1: changed
+        # else: error
+        if rc == 0:
+            return False
+        elif rc == 1:
+            return True
+        else:
+            raise Exception('Failed to compare container configuration: '
+                            'ExitCode: %s Message: %s' %
+                            (rc, raw_output.decode('utf-8')))
+
+    def pull_image(self):
+        args = self.prepare_image_args()
+        old_image = self.check_image()
+
+        try:
+            image = self.pc.images.pull(**args)
+
+            if image.attrs == {}:
+                self.module.fail_json(
+                    msg="The requested image does not exist: {}".format(
+                        self.params['image']),
+                    failed=True
+                )
+            self.changed = old_image != image.attrs
+        except APIError as e:
+            self.module.fail_json(
+                msg="Unknown error message: {}".format(
+                    str(e)),
+                failed=True
+            )
+
+    def remove_container(self):
+        self.changed |= self.systemd.remove_unit_file()
+        container = self.check_container()
+        if container:
+            try:
+                container.remove(force=True)
+            except APIError:
+                if self.check_container():
+                    raise
+
+    def build_ulimits(self, ulimits):
+        ulimits_opt = []
+        for key, value in ulimits.items():
+            soft = value.get('soft')
+            hard = value.get('hard')
+            # Converted to simple dictionary instead of Ulimit type
+            ulimits_opt.append(dict(Name=key,
+                                    Soft=soft,
+                                    Hard=hard))
+        return ulimits_opt
+
+    def create_container(self):
+        args = self.prepare_container_args()
+        container = self.pc.containers.create(**args)
+        if container.attrs == {}:
+            data = container.to_dict()
+            self.module.fail_json(failed=True, msg="Creation failed", **data)
+        else:
+            self.changed |= self.systemd.create_unit_file()
+        return container
+
+    def recreate_or_restart_container(self):
+        strategy = self.params.get(
+            'environment', dict()).get('KOLLA_CONFIG_STRATEGY')
+
+        container = self.get_container_info()
+        if not container:
+            self.start_container()
+            return
+
+        if strategy == 'COPY_ONCE' or self.check_container_differs():
+            self.ensure_image()
+
+            self.stop_container()
+            self.remove_container()
+            self.start_container()
+
+        elif strategy == 'COPY_ALWAYS':
+            self.restart_container()
+
+    def start_container(self):
+        self.ensure_image()
+
+        container = self.check_container()
+        if container and self.check_container_differs():
+            self.stop_container()
+            self.remove_container()
+            container = self.check_container()
+
+        if not container:
+            self.create_container()
+            container = self.check_container()
+
+        if container.status != 'running':
+            self.changed = True
+            if self.params.get('restart_policy') == 'no':
+                container = self.check_container()
+                container.start()
+            else:
+                self.systemd.create_unit_file()
+                if not self.systemd.start():
+                    self.module.fail_json(
+                        changed=True,
+                        msg="Container timed out",
+                        **self.check_container().attrs)
+
+        if not self.params.get('detach'):
+            container = self.check_container()
+            rc = container.wait()
+
+            stdout = [line.decode() for line in container.logs(stdout=True,
+                      stderr=False)]
+            stderr = [line.decode() for line in container.logs(stdout=False,
+                      stderr=True)]
+
+            self.result['rc'] = rc
+            self.result['stdout'] = "\n".join(stdout) if len(stdout) else ""
+            self.result['stderr'] = "\n".join(stderr) if len(stderr) else ""
+
+            if self.params.get('remove_on_exit'):
+                self.stop_container()
+                self.remove_container()
+            if rc != 0:
+                self.module.fail_json(
+                    changed=True,
+                    msg="Container exited with non-zero return code %s" % rc,
+                    **self.result
+                )
+
+    def stop_container(self):
+        name = self.params.get('name')
+        graceful_timeout = self.params.get('graceful_timeout')
+        if not graceful_timeout:
+            graceful_timeout = 10
+        container = self.check_container()
+        if not container:
+            ignore_missing = self.params.get('ignore_missing')
+            if not ignore_missing:
+                self.module.fail_json(
+                    msg="No such container: {} to stop".format(name))
+        elif not (container.status == 'exited' or
+                  container.status == 'stopped'):
+            self.changed = True
+            if self.params.get('restart_policy') != 'no':
+                self.systemd.create_unit_file()
+                self.systemd.stop()
+            else:
+                container.stop(timeout=str(graceful_timeout))
+
+    def stop_and_remove_container(self):
+        container = self.check_container()
+
+        if container:
+            self.stop_container()
+            self.remove_container()
+
+    def restart_container(self):
+        container = self.check_container()
+
+        if not container:
+            self.module.fail_json(
+                msg="No such container: {}".format(self.params.get('name'))
+            )
+        else:
+            self.changed = True
+            self.systemd.create_unit_file()
+
+            if not self.systemd.restart():
+                self.module.fail_json(
+                    changed=True,
+                    msg="Container timed out",
+                    **container.attrs)
+
+    def create_volume(self):
+        if not self.check_volume():
+            self.changed = True
+            args = dict(
+                name=self.params.get('name'),
+                driver='local'
+            )
+
+            vol = self.pc.volumes.create(**args)
+            self.result = vol.attrs
+
+    def remove_volume(self):
+        if self.check_volume():
+            self.changed = True
+            try:
+                self.pc.volumes.remove(self.params.get('name'))
+            except APIError as e:
+                if e.status_code == 409:
+                    self.module.fail_json(
+                        failed=True,
+                        msg="Volume named '{}' is currently in-use".format(
+                            self.params.get('name')
+                        )
+                    )
+                else:
+                    self.module.fail_json(
+                        failed=True,
+                        msg="Internal error: {}".format(
+                            e.explanation
+                        )
+                    )
+                raise
+
+    def remove_image(self):
+        if self.check_image():
+            image = self.pc.images.get(self.params['image'])
+            self.changed = True
+            try:
+                image.remove()
+            except APIError as e:
+                if e.status_code == 409:
+                    self.module.fail_json(
+                        failed=True,
+                        msg="Image '{}' is currently in-use".format(
+                            self.params.get('image')
+                        )
+                    )
+                else:
+                    self.module.fail_json(
+                        failed=True,
+                        msg="Internal error: {}".format(
+                            str(e)
+                        )
+                    )
+                raise
+
+    def ensure_image(self):
+        if not self.check_image():
+            self.pull_image()
diff --git a/ansible/module_utils/kolla_systemd_worker.py b/ansible/module_utils/kolla_systemd_worker.py
index 72c50b316407dd31ca913e21fd82b03cb71428c2..58cd1338bf1672af741e2356351a13eeefa7967a 100644
--- a/ansible/module_utils/kolla_systemd_worker.py
+++ b/ansible/module_utils/kolla_systemd_worker.py
@@ -21,15 +21,15 @@ TEMPLATE = '''# ${service_name}
 # autogenerated by Kolla-Ansible
 
 [Unit]
-Description=docker ${service_name}
-After=docker.service
-Requires=docker.service
-StartLimitIntervalSec=${restart_timeout}
+Description=${engine} ${service_name}
+After=${deps}
+Requires=${deps}
+StartLimitInterval=${restart_timeout}
 StartLimitBurst=${restart_retries}
 
 [Service]
-ExecStart=/usr/bin/docker start -a ${name}
-ExecStop=/usr/bin/docker stop ${name} -t ${graceful_timeout}
+ExecStart=/usr/bin/${engine} start -a ${name}
+ExecStop=/usr/bin/${engine} stop ${name} -t ${graceful_timeout}
 Restart=${restart_policy}
 RestartSec=${restart_duration}
 
@@ -46,6 +46,12 @@ class SystemdWorker(object):
         if not name:
             return None
 
+        container_engine = params.get('container_engine')
+        if container_engine == 'docker':
+            dependencies = 'docker.service'
+        else:
+            dependencies = 'network-online.target'
+
         restart_policy = params.get('restart_policy', 'no')
         if restart_policy == 'unless-stopped':
             restart_policy = 'always'
@@ -62,8 +68,8 @@ class SystemdWorker(object):
         self.container_dict = dict(
             name=name,
             service_name='kolla-' + name + '-container.service',
-            engine='docker',
-            deps='docker.service',
+            engine=container_engine,
+            deps=dependencies,
             graceful_timeout=params.get('graceful_timeout'),
             restart_policy=restart_policy,
             restart_timeout=restart_timeout,
diff --git a/ansible/roles/ceilometer/defaults/main.yml b/ansible/roles/ceilometer/defaults/main.yml
index d9024926eb519d16f0669f27e07f8b57df742bf1..febb5070e387bed94b2fdcf509abdd26fb2f8539 100644
--- a/ansible/roles/ceilometer/defaults/main.yml
+++ b/ansible/roles/ceilometer/defaults/main.yml
@@ -22,7 +22,7 @@ ceilometer_services:
     enabled: True
     privileged: True
     image: "{{ ceilometer_compute_image_full }}"
-    volumes: "{{ ceilometer_compute_default_volumes + ceilometer_compute_extra_volumes }}"
+    volumes: "{{ ceilometer_compute_default_volumes + ceilometer_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ ceilometer_compute_dimensions }}"
     healthcheck: "{{ ceilometer_compute_healthcheck }}"
   ceilometer-ipmi:
@@ -136,7 +136,7 @@ ceilometer_compute_default_volumes:
   - "{{ node_config_directory }}/ceilometer-compute/:{{ container_config_directory }}/:ro"
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "ceilometer:/var/lib/ceilometer/"
   - "kolla_logs:/var/log/kolla/"
   - "{{ ceilometer_libvirt_volume }}:/var/lib/libvirt"
diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml
index 0d88b8f57068f6d4d8a06afb61d75cba91ed0dee..1a4440bc5f823cf099ea95fe06ce65e0c6c47eb9 100644
--- a/ansible/roles/cinder/defaults/main.yml
+++ b/ansible/roles/cinder/defaults/main.yml
@@ -40,7 +40,7 @@ cinder_services:
     privileged: True
     ipc_mode: "host"
     tmpfs: "{{ cinder_volume_tmpfs }}"
-    volumes: "{{ cinder_volume_default_volumes + cinder_volume_extra_volumes }}"
+    volumes: "{{ cinder_volume_default_volumes + cinder_volume_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ cinder_volume_dimensions }}"
     healthcheck: "{{ cinder_volume_healthcheck }}"
   cinder-backup:
@@ -49,7 +49,7 @@ cinder_services:
     enabled: "{{ enable_cinder_backup | bool }}"
     image: "{{ cinder_backup_image_full }}"
     privileged: True
-    volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes }}"
+    volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ cinder_backup_dimensions }}"
     healthcheck: "{{ cinder_backup_healthcheck }}"
 
@@ -171,7 +171,7 @@ cinder_backup_default_volumes:
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
   - "/lib/modules:/lib/modules:ro"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "cinder:/var/lib/cinder"
   - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
   - "kolla_logs:/var/log/kolla/"
@@ -188,7 +188,7 @@ cinder_volume_default_volumes:
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
   - "/lib/modules:/lib/modules:ro"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "cinder:/var/lib/cinder"
   - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
   - "{% if enable_cinder_backend_lvm | bool and cinder_target_helper == 'lioadm' %}target_config:/etc/target{% endif %}"
diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml
index 7071b4f1403b16335dc991ab9ae5f0e1abb9b396..cd2449ece498321495211aaf7c99a2a52dfeaef6 100644
--- a/ansible/roles/common/defaults/main.yml
+++ b/ansible/roles/common/defaults/main.yml
@@ -18,7 +18,7 @@ common_services:
       ANSIBLE_NOCOLOR: "1"
       ANSIBLE_LIBRARY: "/usr/share/ansible"
     privileged: True
-    volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes }}"
+    volumes: "{{ kolla_toolbox_default_volumes + kolla_toolbox_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ kolla_toolbox_dimensions }}"
   cron:
     container_name: cron
@@ -107,7 +107,7 @@ kolla_toolbox_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
-  - "/run/:/run/:shared"
+  - "/run/:/run/{{ ':shared' if kolla_container_engine == 'docker' else '' }}"   # see: https://github.com/containers/podman/issues/16305
   - "kolla_logs:/var/log/kolla/"
 cron_default_volumes:
   - "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro"
diff --git a/ansible/roles/common/tasks/bootstrap.yml b/ansible/roles/common/tasks/bootstrap.yml
index eb824bf6525a9ae534cf51e24672df73fe205a3b..11777b722af0d34520658565e77ecb7f0d275770 100644
--- a/ansible/roles/common/tasks/bootstrap.yml
+++ b/ansible/roles/common/tasks/bootstrap.yml
@@ -10,7 +10,7 @@
 - name: Link kolla_logs volume to /var/log/kolla
   become: true
   file:
-    src: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes/kolla_logs/_data"
+    src: "{{ container_engine_volumes_path }}/kolla_logs/_data"
     path: /var/log/kolla
     state: link
   when: inventory_hostname in groups['kolla-logs']
diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml
index 77213486f19d5cfd9f749b1a003b2245d51d547e..810ccc571fe145f5a1d2a78f27557eb6608e4969 100644
--- a/ansible/roles/common/tasks/config.yml
+++ b/ansible/roles/common/tasks/config.yml
@@ -27,6 +27,18 @@
   when:
     - kolla_copy_ca_into_containers | bool
 
+- name: Copying over /run subdirectories conf
+  become: true
+  template:
+    src: kolla-directories.conf.j2
+    dest: /etc/tmpfiles.d/kolla.conf
+  when: kolla_container_engine == 'podman'
+
+- name: Restart systemd-tmpfiles
+  become: true
+  command: systemd-tmpfiles --create
+  when: kolla_container_engine == 'podman'
+
 - name: Copying over config.json files for services
   template:
     src: "{{ item.key }}.json.j2"
diff --git a/ansible/roles/common/templates/kolla-directories.conf.j2 b/ansible/roles/common/templates/kolla-directories.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..3831b21065be56a48b5b21a0dabf196a78d131b8
--- /dev/null
+++ b/ansible/roles/common/templates/kolla-directories.conf.j2
@@ -0,0 +1,3 @@
+{% for path in run_default_subdirectories %}
+d {{ path }} 0755 root root - -
+{% endfor %}
diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml
index d34d3d86a4f4a3a03e79c2e3586b304aa36585f2..2cbdf834ed694a76307ebe221812a169a0ede41f 100644
--- a/ansible/roles/ironic/defaults/main.yml
+++ b/ansible/roles/ironic/defaults/main.yml
@@ -30,7 +30,7 @@ ironic_services:
     enabled: true
     image: "{{ ironic_conductor_image_full }}"
     privileged: True
-    volumes: "{{ ironic_conductor_default_volumes + ironic_conductor_extra_volumes }}"
+    volumes: "{{ ironic_conductor_default_volumes + ironic_conductor_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ ironic_conductor_dimensions }}"
     healthcheck: "{{ ironic_conductor_healthcheck }}"
   ironic-inspector:
@@ -230,7 +230,7 @@ ironic_conductor_default_volumes:
   - "/lib/modules:/lib/modules:ro"
   - "/sys:/sys"
   - "/dev:/dev"
-  - "/run:/run:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "kolla_logs:/var/log/kolla"
   - "ironic:/var/lib/ironic"
   - "{{ kolla_dev_repos_directory ~ '/ironic/ironic:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/ironic' if ironic_dev_mode | bool else '' }}"
diff --git a/ansible/roles/iscsi/defaults/main.yml b/ansible/roles/iscsi/defaults/main.yml
index 688f94cb2cf2b6f0638dedbd8f93ef066ebb51f7..b4bbe845ad25b1be3be4d25d0d960188bf95426c 100644
--- a/ansible/roles/iscsi/defaults/main.yml
+++ b/ansible/roles/iscsi/defaults/main.yml
@@ -7,7 +7,7 @@ iscsi_services:
     image: "{{ iscsid_image_full }}"
     ipc_mode: "host"
     privileged: True
-    volumes: "{{ iscsid_default_volumes + iscsid_extra_volumes }}"
+    volumes: "{{ iscsid_default_volumes + iscsid_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ iscsid_dimensions }}"
   tgtd:
     container_name: tgtd
@@ -16,7 +16,7 @@ iscsi_services:
     image: "{{ tgtd_image_full }}"
     ipc_mode: "host"
     privileged: True
-    volumes: "{{ tgtd_default_volumes + tgtd_extra_volumes }}"
+    volumes: "{{ tgtd_default_volumes + tgtd_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ tgtd_dimensions }}"
 
 tgtd_interface_address: "{{ api_interface_address }}"
@@ -43,7 +43,7 @@ iscsid_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/lib/modules:/lib/modules:ro"
   - "/sys/kernel/config:/configfs"
   - "iscsi_info:/etc/iscsi"
@@ -53,7 +53,7 @@ tgtd_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/lib/modules:/lib/modules:ro"
   - "/sys/kernel/config:/configfs"
 iscsid_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/kuryr/defaults/main.yml b/ansible/roles/kuryr/defaults/main.yml
index 0a2cbfa6213b30a8ee44059cd91f6486c4dc27ec..6403028861e03ea25062fa0f280d7b6431450dc8 100644
--- a/ansible/roles/kuryr/defaults/main.yml
+++ b/ansible/roles/kuryr/defaults/main.yml
@@ -16,7 +16,7 @@ kuryr_services:
     privileged: True
     cap_add:
       - NET_ADMIN
-    volumes: "{{ kuryr_default_volumes + kuryr_extra_volumes }}"
+    volumes: "{{ kuryr_default_volumes + kuryr_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ kuryr_dimensions }}"
     healthcheck: "{{ kuryr_healthcheck }}"
 
@@ -52,7 +52,7 @@ kuryr_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/lib/modules:/lib/modules:ro"
-  - "/run:/run:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/usr/lib/docker:/usr/lib/docker"
   - "{{ kolla_dev_repos_directory ~ '/kuryr/kuryr:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/kuryr' if kuryr_dev_mode | bool else '' }}"
   - "{{ kolla_dev_repos_directory ~ '/kuryr-libnetwork/kuryr_libnetwork:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/kuryr_libnetwork' if kuryr_dev_mode | bool else '' }}"
diff --git a/ansible/roles/manila/defaults/main.yml b/ansible/roles/manila/defaults/main.yml
index 0ac9a24d8986e196c7a833862d36d65a12277686..6244997ed1723b4faea3628a50fb9dd6b75cffeb 100644
--- a/ansible/roles/manila/defaults/main.yml
+++ b/ansible/roles/manila/defaults/main.yml
@@ -36,7 +36,7 @@ manila_services:
     image: "{{ manila_share_image_full }}"
     enabled: True
     privileged: True
-    volumes: "{{ manila_share_default_volumes + manila_share_extra_volumes }}"
+    volumes: "{{ manila_share_default_volumes + manila_share_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ manila_share_dimensions }}"
     healthcheck: "{{ manila_share_healthcheck }}"
   manila-data:
@@ -45,7 +45,7 @@ manila_services:
     image: "{{ manila_data_image_full }}"
     enabled: True
     privileged: True
-    volumes: "{{ manila_data_default_volumes + manila_data_extra_volumes }}"
+    volumes: "{{ manila_data_default_volumes + manila_data_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ manila_data_dimensions }}"
     healthcheck: "{{ manila_data_healthcheck }}"
 
@@ -159,7 +159,7 @@ manila_share_default_volumes:
   - "{{ node_config_directory }}/manila-share/:{{ container_config_directory }}/:ro"
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "kolla_logs:/var/log/kolla/"
   - "/lib/modules:/lib/modules:ro"
   - "{{ kolla_dev_repos_directory ~ '/manila/manila:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/manila' if manila_dev_mode | bool else '' }}"
@@ -180,7 +180,7 @@ manila_data_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "kolla_logs:/var/log/kolla/"
   - "{{ kolla_dev_repos_directory ~ '/manila/manila:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/manila' if manila_dev_mode | bool else '' }}"
 
diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml
index a2a47a4087234a15b0742a5ec89287b748d0cf17..fc3f1c600aae499eaabfa83dcf0c6ea0a3a58b5d 100644
--- a/ansible/roles/mariadb/tasks/recover_cluster.yml
+++ b/ansible/roles/mariadb/tasks/recover_cluster.yml
@@ -20,6 +20,7 @@
       kolla_docker:
         name: "{{ mariadb_service.container_name }}"
         action: "stop_container"
+        common_options: "{{ docker_common_options }}"
         ignore_missing: true
 
     # Run wsrep recovery with detach=false to block until completion. Use a
@@ -42,7 +43,7 @@
 
     - name: Copying MariaDB log file to /tmp
       become: true
-      command: "cp {{ docker_runtime_directory or '/var/lib/docker' }}/volumes/kolla_logs/_data/mariadb/mariadb.log /tmp/mariadb_tmp.log"
+      command: "cp {{ container_engine_volumes_path }}/kolla_logs/_data/mariadb/mariadb.log /tmp/mariadb_tmp.log"
 
     # Look for sequence number in logs. Format is:
     # WSREP: Recovered position: <UUID>:<seqno>.
@@ -99,7 +100,7 @@
   become: true
   lineinfile:
     create: yes
-    dest: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes/mariadb/_data/grastate.dat"
+    dest: "{{ container_engine_volumes_path }}/mariadb/_data/grastate.dat"
     regexp: 'safe_to_bootstrap:(.*)$'
     line: 'safe_to_bootstrap: 1'
     state: present
diff --git a/ansible/roles/multipathd/defaults/main.yml b/ansible/roles/multipathd/defaults/main.yml
index 0d098d9debd458fa6e919638dc8b1b3c814ccdd5..5db6d1055910b4df966d3b57e0c6f01b268bf24f 100644
--- a/ansible/roles/multipathd/defaults/main.yml
+++ b/ansible/roles/multipathd/defaults/main.yml
@@ -7,7 +7,7 @@ multipathd_services:
     ipc_mode: "host"
     privileged: True
     image: "{{ multipathd_image_full }}"
-    volumes: "{{ multipathd_default_volumes + multipathd_extra_volumes }}"
+    volumes: "{{ multipathd_default_volumes + multipathd_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
 
 
 ####################
@@ -23,7 +23,7 @@ multipathd_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/dev/:/dev/"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/lib/modules:/lib/modules:ro"
   - "/sys/kernel/config:/configfs"
 multipathd_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/nova-cell/defaults/main.yml b/ansible/roles/nova-cell/defaults/main.yml
index 2fade0e2b6449e2d0e93df9cd73e31a01b24bd54..4e07db6e89d36bb35efafc17d7bdda647d40cae8 100644
--- a/ansible/roles/nova-cell/defaults/main.yml
+++ b/ansible/roles/nova-cell/defaults/main.yml
@@ -8,7 +8,7 @@ nova_cell_services:
     pid_mode: "host"
     cgroupns_mode: "host"
     privileged: True
-    volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes }}"
+    volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ nova_libvirt_dimensions }}"
     healthcheck: "{{ nova_libvirt_healthcheck }}"
   nova-ssh:
@@ -59,7 +59,7 @@ nova_cell_services:
     privileged: True
     enabled: "{{ not enable_nova_fake | bool }}"
     ipc_mode: "host"
-    volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes }}"
+    volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ nova_compute_dimensions }}"
     healthcheck: "{{ nova_compute_healthcheck }}"
   nova-compute-ironic:
@@ -371,8 +371,9 @@ nova_libvirt_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/lib/modules:/lib/modules:ro"
-  - "/run/:/run/:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/dev:/dev"
+  - "{{ 'devpts:/dev/pts' if kolla_container_engine == 'podman' else '' }}"
   - "/sys/fs/cgroup:/sys/fs/cgroup"
   - "kolla_logs:/var/log/kolla/"
   - "libvirtd:/var/lib/libvirt"
@@ -418,7 +419,7 @@ nova_compute_default_volumes:
   - "/etc/localtime:/etc/localtime:ro"
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "/lib/modules:/lib/modules:ro"
-  - "/run:/run:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/dev:/dev"
   - "kolla_logs:/var/log/kolla/"
   - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
@@ -439,6 +440,7 @@ nova_cell_bootstrap_default_volumes:
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "kolla_logs:/var/log/kolla/"
   - "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
+  - "{{ 'nova-cell:/var/lib/script/' if kolla_container_engine == 'podman' else '' }}"
 
 nova_extra_volumes: "{{ default_extra_volumes }}"
 nova_libvirt_extra_volumes: "{{ nova_extra_volumes }}"
diff --git a/ansible/roles/nova-cell/handlers/main.yml b/ansible/roles/nova-cell/handlers/main.yml
index 5180007b5c4607d48bf464675b7102c246307625..5ad5dc9800ef8e0d12c0a77d45d44e4f5bb84357 100644
--- a/ansible/roles/nova-cell/handlers/main.yml
+++ b/ansible/roles/nova-cell/handlers/main.yml
@@ -198,7 +198,12 @@
       - "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
       - "/lib/modules:/lib/modules:ro"
-      - "/run:/run:shared"
+      - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
+      - "/run/netns:/run/netns:shared"
+      - "/run/lock/nova:/run/lock/nova:shared"
+      - "/run/libvirt:/run/libvirt:shared"
+      - "/run/nova:/run/nova:shared"
+      - "/run/openvswitch:/run/openvswitch:shared"
       - "kolla_logs:/var/log/kolla/"
   with_sequence: start=1 end={{ num_nova_fake_per_node }}
   when:
diff --git a/ansible/roles/nova-cell/tasks/config-nova-fake.yml b/ansible/roles/nova-cell/tasks/config-nova-fake.yml
index 17c6381e1501c239db3ee03868c244846401f116..be69093d12b87f638f9f11a04128260634a78ceb 100644
--- a/ansible/roles/nova-cell/tasks/config-nova-fake.yml
+++ b/ansible/roles/nova-cell/tasks/config-nova-fake.yml
@@ -57,7 +57,12 @@
       - "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
       - "/lib/modules:/lib/modules:ro"
-      - "/run:/run:shared"
+      - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
+      - "/run/netns:/run/netns:shared"
+      - "/run/lock/nova:/run/lock/nova:shared"
+      - "/run/libvirt:/run/libvirt:shared"
+      - "/run/nova:/run/nova:shared"
+      - "/run/openvswitch:/run/openvswitch:shared"
       - "kolla_logs:/var/log/kolla/"
   with_sequence: start=1 end={{ num_nova_fake_per_node }}
   when:
diff --git a/ansible/roles/openvswitch/tasks/config-host.yml b/ansible/roles/openvswitch/tasks/config-host.yml
index 18dae6a073061da6e5c3f3ab3b871bd827199ec5..dfd4f8cff44be4024c9b2e968a4da8369925c7fb 100644
--- a/ansible/roles/openvswitch/tasks/config-host.yml
+++ b/ansible/roles/openvswitch/tasks/config-host.yml
@@ -5,3 +5,16 @@
   vars:
     modules:
       - {'name': openvswitch}
+
+# NOTE(m.hiner): Podman considers non-existent mount directory
+# as a error, so it has to be created beforehand.
+# See: https://github.com/containers/podman/issues/14781
+- name: Create /run/openvswitch directory on host
+  become: True
+  file:
+    path: /run/openvswitch
+    state: directory
+    mode: "0770"
+    owner: "{{ config_owner_user }}"
+    group: "{{ config_owner_group }}"
+  when: kolla_container_engine == 'podman'
diff --git a/ansible/roles/prechecks/tasks/service_checks.yml b/ansible/roles/prechecks/tasks/service_checks.yml
index 1fdae59cef61fe1c9ff4937702141a44a7ee3e56..0400e07271a9bbdd8b1ff4670424741d875250b3 100644
--- a/ansible/roles/prechecks/tasks/service_checks.yml
+++ b/ansible/roles/prechecks/tasks/service_checks.yml
@@ -12,7 +12,9 @@
   register: result
   changed_when: false
   check_mode: false
-  when: inventory_hostname in groups['baremetal']
+  when:
+    - kolla_container_engine == 'docker'
+    - inventory_hostname in groups['baremetal']
   failed_when: result is failed
                or result.stdout | regex_replace('.*\\b(\\d+\\.\\d+\\.\\d+)\\b.*', '\\1') is version(docker_version_min, '<')
 
diff --git a/ansible/roles/prune-images/tasks/prune_images.yml b/ansible/roles/prune-images/tasks/prune_images.yml
index 154e5e5f04f40d50041e1f19d58c3d20a137561c..c1cea204fdadc8ee40d084c7ed31b28612d5bb87 100644
--- a/ansible/roles/prune-images/tasks/prune_images.yml
+++ b/ansible/roles/prune-images/tasks/prune_images.yml
@@ -6,3 +6,11 @@
     images_filters:
       label: kolla_version
     timeout: "{{ docker_image_prune_timeout }}"
+  when: kolla_container_engine == 'docker'
+
+# NOTE(m.hiner): Podman does not (yet?) have equivalent of docker_prune
+# and generic module podman_image does not support label filters
+- name: Pruning Kolla images
+  become: true
+  command: podman image prune --force --filter 'label=kolla_version'
+  when: kolla_container_engine == 'podman'
diff --git a/ansible/roles/sahara/defaults/main.yml b/ansible/roles/sahara/defaults/main.yml
index 100cc888e3a55d65d011670761c6251788010efc..c22a6a16b63898316d323bd0c1ff33fdbdb1cdf7 100644
--- a/ansible/roles/sahara/defaults/main.yml
+++ b/ansible/roles/sahara/defaults/main.yml
@@ -28,7 +28,7 @@ sahara_services:
     enabled: true
     image: "{{ sahara_engine_image_full }}"
     privileged: True
-    volumes: "{{ sahara_engine_default_volumes + sahara_engine_extra_volumes }}"
+    volumes: "{{ sahara_engine_default_volumes + sahara_engine_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ sahara_engine_dimensions }}"
     healthcheck: "{{ sahara_engine_healthcheck }}"
 
@@ -115,7 +115,7 @@ sahara_engine_default_volumes:
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "sahara:/var/lib/sahara/"
   - "kolla_logs:/var/log/kolla/"
-  - "/run:/run:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "{{ kolla_dev_repos_directory ~ '/sahara/sahara:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/sahara' if sahara_dev_mode | bool else '' }}"
 
 sahara_extra_volumes: "{{ default_extra_volumes }}"
diff --git a/ansible/roles/zun/defaults/main.yml b/ansible/roles/zun/defaults/main.yml
index 1fa74da4e98e98b96e6d9473d18069eee06f5035..98431f8f0cc23da0b55a9ec3efe86cc59cc2fd60 100644
--- a/ansible/roles/zun/defaults/main.yml
+++ b/ansible/roles/zun/defaults/main.yml
@@ -47,7 +47,7 @@ zun_services:
     enabled: true
     image: "{{ zun_compute_image_full }}"
     privileged: True
-    volumes: "{{ zun_compute_default_volumes + zun_compute_extra_volumes }}"
+    volumes: "{{ zun_compute_default_volumes + zun_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ zun_compute_dimensions }}"
     healthcheck: "{{ zun_compute_healthcheck }}"
   zun-cni-daemon:
@@ -56,7 +56,7 @@ zun_services:
     enabled: true
     image: "{{ zun_cni_daemon_image_full }}"
     privileged: True
-    volumes: "{{ zun_cni_daemon_default_volumes + zun_cni_daemon_extra_volumes }}"
+    volumes: "{{ zun_cni_daemon_default_volumes + zun_cni_daemon_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
     dimensions: "{{ zun_cni_daemon_dimensions }}"
     healthcheck: "{{ zun_cni_daemon_healthcheck }}"
 
@@ -185,7 +185,7 @@ zun_compute_default_volumes:
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "kolla_logs:/var/log/kolla/"
   - "{{ kolla_dev_repos_directory ~ '/zun/zun:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/zun' if zun_dev_mode | bool else '' }}"
-  - "/run:/run:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
   - "/usr/lib/docker:/usr/lib/docker"
   - "/var/lib/docker:/var/lib/docker"
   - "/lib/modules:/lib/modules:ro"
@@ -198,7 +198,7 @@ zun_cni_daemon_default_volumes:
   - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
   - "kolla_logs:/var/log/kolla/"
   - "{{ kolla_dev_repos_directory ~ '/zun/zun:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/zun' if zun_dev_mode | bool else '' }}"
-  - "/run:/run:shared"
+  - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
 
 zun_extra_volumes: "{{ default_extra_volumes }}"
 zun_api_extra_volumes: "{{ zun_extra_volumes }}"
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index 71f1ea0a353af504ad04497a9d0040d609c08e68..4b3424fce30b46eace26ba76782ff925154a31b3 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -79,12 +79,13 @@ workaround_ansible_issue_8743: yes
 # Optionally change the path to sysctl.conf modified by Kolla Ansible plays.
 #kolla_sysctl_conf_path: /etc/sysctl.conf
 
-################
+##################
 # Container engine
-################
+##################
+
+# Valid options are [ docker, podman ]
+#kolla_container_engine: docker
 
-# Valid options are [ docker ]
-# kolla_container_engine: docker
 
 ################
 # Docker options
diff --git a/releasenotes/notes/feature-podman-support-c4fa7c367b4ceac4.yaml b/releasenotes/notes/feature-podman-support-c4fa7c367b4ceac4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1bf410897363d3943cc55b90fbbe87b063338ca5
--- /dev/null
+++ b/releasenotes/notes/feature-podman-support-c4fa7c367b4ceac4.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    Implements support for Podman deployment as an alternative to Docker.
+    To perform deployment using Podman, set the variable
+    ``kolla_container_engine`` to value ``podman``
+    inside of the ``globals.yml`` file.
diff --git a/tests/kolla-toolbox-testsuite.yml b/tests/kolla-toolbox-testsuite.yml
new file mode 100644
index 0000000000000000000000000000000000000000..23a3fb08fd4a9166523c60c5297ec10657f86d86
--- /dev/null
+++ b/tests/kolla-toolbox-testsuite.yml
@@ -0,0 +1,89 @@
+---
+- name: Test successful & unchanged
+  kolla_toolbox:
+    common_options:
+      container_engine: "{{ item }}"
+    module_name: debug
+    module_args:
+      msg: hi
+  register: result
+
+- name: Assert result is successful
+  assert:
+    that: result is successful
+
+- name: Assert result is not changed
+  assert:
+    that: result is not changed
+
+- name: Test successful & changed
+  kolla_toolbox:
+    common_options:
+      container_engine: "{{ item }}"
+    module_name: command
+    module_args:
+      echo hi
+  register: result
+
+- name: Assert result is successful
+  assert:
+    that: result is successful
+
+- name: Assert result is changed
+  assert:
+    that: result is changed
+
+- name: Test unsuccessful
+  kolla_toolbox:
+    common_options:
+      container_engine: "{{ item }}"
+    module_name: command
+    module_args:
+      foo
+  register: result
+  ignore_errors: true
+
+- name: Assert result is failed
+  assert:
+    that: result is failed
+
+- name: Test invalid module parameters
+  kolla_toolbox:
+    common_options:
+      container_engine: "{{ item }}"
+    module_name: debug
+    module_args:
+      foo: bar
+  register: result
+  ignore_errors: true
+
+- name: Assert result is failed
+  assert:
+    that: result is failed
+
+- name: Setup for Test successful & changed (JSON format)
+  kolla_toolbox:
+    common_options:
+      container_engine: "{{ item }}"
+    module_name: file
+    module_args:
+      path: /tmp/foo
+      state: absent
+
+- name: Test successful & changed (JSON format)
+  kolla_toolbox:
+    common_options:
+      container_engine: "{{ item }}"
+    module_name: file
+    module_args:
+      path: /tmp/foo
+      state: directory
+  register: result
+
+- name: Assert result is successful
+  assert:
+    that: result is successful
+
+- name: Assert result is changed
+  assert:
+    that: result is changed
diff --git a/tests/kolla_docker_tests/test_docker_worker.py b/tests/kolla_docker_tests/test_docker_worker.py
index d85c2255d860bf391c6425de77bc88610f018aec..1916aa79cb075853f5c5d25e22f38f1db22999aa 100644
--- a/tests/kolla_docker_tests/test_docker_worker.py
+++ b/tests/kolla_docker_tests/test_docker_worker.py
@@ -39,7 +39,7 @@ dwm = imp.load_source('kolla_docker_worker', docker_worker_file)
 FAKE_DATA = {
 
     'params': {
-        'common_options': None,
+        'container_engine': 'docker',
         'api_version': None,
         'auth_username': None,
         'auth_password': None,
@@ -224,19 +224,21 @@ class TestMainModule(base.BaseTestCase):
         module_mock.fail_json.assert_called_once_with(
             changed=True, msg=repr("Some very ugly traceback"))
 
-    @mock.patch("kolla_docker.DockerWorker")
     @mock.patch("kolla_docker.generate_module")
-    def test_execute_module(self, mock_generate_module, mock_dw):
-        mock_dw.return_value.check_image.return_value = False
-        mock_dw.return_value.changed = False
-        mock_dw.return_value.result = {"some_key": "some_value"}
+    def test_execute_module(self, mock_generate_module):
         module_mock = mock.MagicMock()
         module_mock.params = self.fake_data['params']
         module_mock.params["action"] = "check_image"
         mock_generate_module.return_value = module_mock
-        kd.main()
-        mock_dw.assert_called_once_with(module_mock)
-        mock_dw.return_value.check_image.assert_called_once_with()
+        with mock.patch(
+            "ansible.module_utils.kolla_docker_worker.DockerWorker"
+        ) as mock_dw:
+            mock_dw.return_value.check_image.return_value = False
+            mock_dw.return_value.changed = False
+            mock_dw.return_value.result = {"some_key": "some_value"}
+            kd.main()
+            mock_dw.assert_called_once_with(module_mock)
+            mock_dw.return_value.check_image.assert_called_once_with()
         module_mock.exit_json.assert_called_once_with(changed=False,
                                                       result=False,
                                                       some_key="some_value")
diff --git a/tests/kolla_docker_tests/test_podman_worker.py b/tests/kolla_docker_tests/test_podman_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..5258f05ebcb70ebbe6617b73e0e811428cd7ea48
--- /dev/null
+++ b/tests/kolla_docker_tests/test_podman_worker.py
@@ -0,0 +1,1651 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import copy
+import imp
+import os
+import sys
+import unittest
+from unittest import mock
+
+from oslotest import base
+from podman import errors as podman_error
+
+sys.modules['dbus'] = mock.MagicMock()
+
+this_dir = os.path.dirname(sys.modules[__name__].__file__)
+ansible_dir = os.path.join(this_dir, '..', '..', 'ansible')
+kolla_docker_file = os.path.join(ansible_dir,
+                                 'library', 'kolla_docker.py')
+podman_worker_file = os.path.join(ansible_dir,
+                                  'module_utils', 'kolla_podman_worker.py')
+kd = imp.load_source('kolla_docker', kolla_docker_file)
+pwm = imp.load_source('kolla_podman_worker', podman_worker_file)
+
+FAKE_DATA = {
+    'params': {
+        'container_engine': 'podman',
+        'command': None,
+        'detach': True,
+        'environment': {},
+        'host_config': {
+            'network_mode': 'host',
+            'ipc_mode': '',
+            'cap_add': None,
+            'security_opt': None,
+            'pid_mode': '',
+            'privileged': False,
+            'tmpfs': None,
+            'volumes_from': None,
+            'restart_policy': 'unless-stopped',
+            'restart_retries': 10},
+        'labels': {'build-date': '2016-06-02',
+                   'kolla_version': '2.0.1',
+                   'license': 'GPLv2',
+                   'name': 'ubuntu Base Image',
+                   'vendor': 'ubuntuOS'},
+        'image': 'myregistrydomain.com:5000/ubuntu:16.04',
+        'name': 'test_container',
+        'remove_on_exit': True,
+        'volumes': None,
+        'tty': False,
+        'client_timeout': 120,
+    },
+
+    'images': [
+        {'Created': '2022-04-08T02:20:56.825403178Z',
+         'Labels': {},
+         'VirtualSize': 120759015,
+         'Parent': '',
+         'RepoTags': ['myregistrydomain.com:5000/ubuntu:16.04'],
+         'Id': '7528a4009573fa8c5dbf4b6f5fad9f5b8d3a0fb90e22bb1b217211b553eb22cf',      # noqa: E501
+         'Size': 120759015},
+        {'Created': '2022-04-08T02:22:00.695203378Z',
+         'Labels': {},
+         'VirtualSize': 403096303,
+         'Parent': '',
+         'RepoTags': ['myregistrydomain.com:5000/centos:7.0'],
+         'Id': '15529c81ae4a83084b076a16bc314e1af0b040a937f585311c87863fecc623a3',      # noqa: E501
+         'Size': 403096303}
+    ],
+
+    'containers': [
+        {'Created': '2022-06-23T14:30:35.595194629Z',
+         'State': {'Status': 'running'},
+         'HostConfig': {'NetworkMode': 'host'},
+         'Id': '1663dfafec3bb59386e4a024416c8b0a872ae0984c9806322751d14b9f794c56',      # noqa: E501
+         'ImageName': 'myregistrydomain.com:5000/ubuntu:16.04',
+         'Image': '7528a4009573fa8c5dbf4b6f5fad9f5b8d3a0fb90e22bb1b217211b553eb22cf',   # noqa: E501
+         'Labels': {},
+         'Name': 'my_container'},
+        {'Created': '2022-06-23T14:32:13.17545575Z',
+         'State': {'Status': 'exited'},
+         'HostConfig': {'NetworkMode': 'host'},
+         'Id': '9404fc5f90118ddbbc31bb4c9462ad06aa7163eac1bc6d74c3e978143f10cc0c',      # noqa: E501
+         'ImageName': 'myregistrydomain.com:5000/ubuntu:16.04',
+         'Image': '15529c81ae4a83084b076a16bc314e1af0b040a937f585311c87863fecc623a3',   # noqa: E501
+         'Labels': {},
+         'Name': 'exited_container'},
+    ],
+
+    'container_inspect': {
+        'Config': {
+            'Env': ['KOLLA_BASE_DISTRO=ubuntu',
+                    'KOLLA_INSTALL_TYPE=binary',
+                    'KOLLA_INSTALL_METATYPE=rdo'],
+            'Hostname': 'node2',
+            'Volumes': {'/var/lib/kolla/config_files/': {}}},
+        'Mounts': {},
+        'NetworkSettings': {}
+    }
+
+}
+
+
+class APIErrorStub(Exception):
+    pass
+
+
+def get_PodmanWorker(mod_param):
+    module = mock.MagicMock()
+    module.params = mod_param
+    pw = pwm.PodmanWorker(module)
+    pw.systemd = mock.MagicMock()
+    pw.pc = mock.MagicMock()
+    return pw
+
+
+def construct_image(img_dict):
+    image = mock.Mock()
+    image.attrs = img_dict
+    return image
+
+
+def construct_volume(vol_dict):
+    volume = mock.Mock()
+    volume.attrs = vol_dict
+    return volume
+
+
+def construct_container(cont_dict):
+    container = mock.Mock()
+    container.name = cont_dict['Name']
+    container.attrs = copy.deepcopy(cont_dict)
+    container.status = cont_dict['State']['Status']
+    return container
+
+
+def get_containers(override=None):
+    if override:
+        cont_dicts = override
+    else:
+        cont_dicts = copy.deepcopy(FAKE_DATA['containers'])
+
+    containers = []
+    for c in cont_dicts:
+        containers.append(construct_container(c))
+
+    return containers
+
+
+class TestMainModule(base.BaseTestCase):
+    def setUp(self):
+        super(TestMainModule, self).setUp()
+        self.fake_data = copy.deepcopy(FAKE_DATA)
+
+    @mock.patch("kolla_docker.generate_module")
+    def test_execute_module(self, mock_generate_module):
+        module_mock = mock.MagicMock()
+        module_mock.params = self.fake_data['params']
+        module_mock.params["action"] = "check_image"
+        mock_generate_module.return_value = module_mock
+        with mock.patch(
+            "ansible.module_utils.kolla_podman_worker.PodmanWorker"
+        ) as mock_pw:
+            mock_pw.return_value.check_image.return_value = False
+            mock_pw.return_value.changed = False
+            mock_pw.return_value.result = {"some_key": "some_value"}
+            kd.main()
+            mock_pw.assert_called_once_with(module_mock)
+            mock_pw.return_value.check_image.assert_called_once_with()
+        module_mock.exit_json.assert_called_once_with(changed=False,
+                                                      result=False,
+                                                      some_key="some_value")
+
+
+class TestContainer(base.BaseTestCase):
+    def setUp(self):
+        super(TestContainer, self).setUp()
+        self.fake_data = copy.deepcopy(FAKE_DATA)
+
+    def test_create_container_fail(self):
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        container = mock.Mock()
+        container.attrs = {}
+        container.to_dict = mock.Mock(return_value={'some': 'value'})
+        self.pw.pc.containers.create = mock.Mock(return_value=container)
+
+        self.pw.create_container()
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.create.assert_called_once()
+        self.pw.module.fail_json.assert_called_once_with(
+            failed=True, msg='Creation failed', some='value')
+        self.pw.systemd.create_unit_file.assert_not_called()
+
+    def test_create_container_without_dimensions(self):
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.prepare_container_args = mock.Mock(
+            return_value={'some': 'value'})
+        self.pw.systemd.create_unit_file = mock.Mock(return_value=True)
+
+        self.pw.create_container()
+        self.assertTrue(self.pw.changed)
+
+    def test_create_container_with_dimensions(self):
+        self.fake_data['params']['dimensions'] = {'blkio_weight': 10}
+        self.pw = get_PodmanWorker(self.fake_data['params'].copy())
+        self.pw.pc.containers.create = mock.MagicMock()
+
+        self.pw.create_container()
+        self.assertTrue(self.pw.changed)
+        podman_create_kwargs = self.pw.pc.containers.create.call_args.kwargs.items()    # noqa
+        self.pw.pc.containers.create.assert_called_once()
+        self.assertIn(('blkio_weight', 10), podman_create_kwargs)
+
+    def test_create_container_wrong_dimensions(self):
+        self.fake_data['params']['dimensions'] = {'random': 10}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+
+        self.pw.create_container()
+        self.pw.module.exit_json.assert_called_once_with(
+            failed=True, msg=repr("Unsupported dimensions"),
+            unsupported_dimensions=set(['random']))
+
+    def test_create_container_with_healthcheck(self):
+        hc = {'test': ['CMD-SHELL', '/bin/check.sh']}
+        self.fake_data['params']['healthcheck'] = hc
+        self.pw = get_PodmanWorker(self.fake_data['params'].copy())
+
+        self.pw.create_container()
+        self.assertTrue(self.pw.changed)
+        podman_create_kwargs = self.pw.pc.containers.create.call_args.kwargs
+        hc_call = podman_create_kwargs.get('healthcheck', None)
+        self.pw.pc.containers.create.assert_called_once()
+        self.assertIsNotNone(hc_call)
+        self.assertEqual(hc, hc_call)
+
+    @unittest.skip("Skipping because tmpfs is currently"
+                   " not supported by podman API.")
+    def test_create_container_with_tmpfs(self):
+        self.fake_data['params']['tmpfs'] = ['/tmp']  # nosec: B108
+        self.pw = get_PodmanWorker(self.fake_data['params'].copy())
+
+        self.pw.create_container()
+        self.assertTrue(self.pw.changed)
+        self.assertEqual(['/tmp'],  # nosec: B108
+                         self.pw.pc.containers.create.call_args[1]['tmpfs'])
+
+    @unittest.skip("Skipping because tmpfs is currently"
+                   " not supported by podman API.")
+    def test_create_container_with_tmpfs_empty_string(self):
+        self.fake_data['params']['tmpfs'] = ['']
+        self.pw = get_PodmanWorker(self.fake_data['params'].copy())
+
+        self.pw.create_container()
+        self.assertTrue(self.pw.changed)
+        self.assertFalse(self.pw.pc.containers.create.call_args[1]['tmpfs'])
+
+    def test_start_container_without_pull(self):
+        self.fake_data['params'].update({'auth_username': 'fake_user',
+                                         'auth_password': 'fake_psw',
+                                         'auth_registry': 'myrepo/myapp',
+                                         'auth_email': 'fake_mail@foogle.com'})
+        self.pw = get_PodmanWorker(self.fake_data['params'].copy())
+        self.pw.pc.images = mock.MagicMock(
+            return_value=self.fake_data['images'])
+        self.pw.pc.containers.list = mock.MagicMock(params={'all': 'True'})
+
+        containers = get_containers()
+        new_container = mock.Mock()
+        new_container.name = 'test_container'
+        new_container.status = 'running'
+        self.pw.pc.containers.list.side_effect = [containers,
+                                                  [*containers, new_container]]
+        self.pw.check_container_differs = mock.MagicMock(return_value=False)
+        self.pw.create_container = mock.MagicMock()
+
+        self.pw.start_container()
+        self.assertFalse(self.pw.changed)
+        self.pw.create_container.assert_called_once_with()
+
+    def test_start_container_with_duplicate_name(self):
+        self.fake_data['params'].update({'name': 'my_container',
+                                         'auth_username': 'fake_user',
+                                         'auth_password': 'fake_psw',
+                                         'auth_registry': 'myrepo/myapp',
+                                         'auth_email': 'fake_mail@foogle.com'})
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.pc.images = mock.MagicMock(
+            return_value=self.fake_data['images'])
+        self.pw.pc.containers.list = mock.MagicMock(params={'all': 'True'})
+        full_cont_list = get_containers()
+        updated_cont_list = full_cont_list[1:]
+        self.pw.pc.containers.list.side_effect = [
+            full_cont_list,
+            full_cont_list,
+            full_cont_list,
+            updated_cont_list,
+            full_cont_list
+        ]
+        self.pw.check_container_differs = mock.MagicMock(return_value=True)
+        self.pw.create_container = mock.MagicMock()
+        self.pw.start_container()
+        self.assertTrue(self.pw.changed)
+        full_cont_list[0].remove.assert_called_once_with(force=True)
+        self.pw.create_container.assert_called_once_with()
+
+    def test_start_container(self):
+        self.fake_data['params'].update({'name': 'my_container',
+                                         'auth_username': 'fake_user',
+                                         'auth_password': 'fake_psw',
+                                         'auth_registry': 'myrepo/myapp',
+                                         'auth_email': 'fake_mail@foogle.com'})
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.pc.images = mock.MagicMock(
+            return_value=self.fake_data['images'])
+        self.fake_data['containers'][0].update(
+            {'State': {'Status': 'exited'}})
+        self.pw.pc.containers.list = mock.MagicMock(
+            return_value=get_containers(self.fake_data['containers']))
+        self.pw.check_container_differs = mock.MagicMock(return_value=False)
+        container = mock.Mock()
+        self.pw.check_container = mock.Mock(return_value=container)
+
+        self.pw.start_container()
+        self.assertTrue(self.pw.changed)
+        container.start.assert_not_called()
+        self.pw.systemd.start.assert_called_once()
+
+    def test_start_container_no_detach(self):
+        self.fake_data['params'].update({'name': 'my_container',
+                                         'detach': False})
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        full_cont_list = get_containers(self.fake_data['containers'])
+        my_container = full_cont_list[0]
+
+        self.pw.pc.images = mock.MagicMock(
+            return_value=self.fake_data['images'])
+        self.pw.pc.containers.list = mock.MagicMock(side_effect=[
+            [],
+            full_cont_list,
+            full_cont_list,
+            full_cont_list,
+            full_cont_list
+        ])
+        my_container.remove = mock.Mock()
+        my_container.wait = mock.MagicMock(return_value=0)
+        my_container.logs = mock.MagicMock(side_effect=[
+            ['fake stdout'.encode()],
+            ['fake stderr'.encode()]])
+
+        self.pw.start_container()
+        self.assertTrue(self.pw.changed)
+        my_container.wait.assert_called_once_with()
+        my_container.logs.assert_has_calls([
+            mock.call(stdout=True, stderr=False),
+            mock.call(stdout=False, stderr=True)])
+        self.pw.systemd.stop.assert_called_once_with()
+        my_container.remove.assert_called_once_with(force=True)
+        expected = {'rc': 0, 'stdout': 'fake stdout', 'stderr': 'fake stderr'}
+        self.assertEqual(expected, self.pw.result)
+
+    def test_start_container_no_systemd(self):
+        self.fake_data['params'].update({'name': 'my_container',
+                                         'restart_policy': 'no',
+                                         'auth_username': 'fake_user',
+                                         'auth_password': 'fake_psw',
+                                         'auth_registry': 'myrepo/myapp',
+                                         'auth_email': 'fake_mail@foogle.com'})
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.pc.images = mock.MagicMock(
+            return_value=self.fake_data['images'])
+        self.fake_data['containers'][0].update(
+            {'State': {'Status': 'exited'}})
+        self.pw.pc.containers.list = mock.MagicMock(
+            return_value=get_containers(self.fake_data['containers']))
+        self.pw.check_container_differs = mock.MagicMock(return_value=False)
+        container = mock.Mock()
+        self.pw.check_container = mock.Mock(return_value=container)
+
+        self.pw.start_container()
+        self.assertTrue(self.pw.changed)
+        container.start.assert_called_once()
+        self.pw.systemd.start.assert_not_called()
+
+    def test_start_container_systemd_start_fail(self):
+        self.fake_data['params'].update({'name': 'my_container',
+                                         'auth_username': 'fake_user',
+                                         'auth_password': 'fake_psw',
+                                         'auth_registry': 'myrepo/myapp',
+                                         'auth_email': 'fake_mail@foogle.com'})
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.pc.images = mock.MagicMock(
+            return_value=self.fake_data['images'])
+        self.fake_data['containers'][0].update(
+            {'State': {'Status': 'exited'}})
+        self.pw.pc.containers.list = mock.MagicMock(
+            return_value=get_containers(self.fake_data['containers']))
+        self.pw.check_container_differs = mock.MagicMock(return_value=False)
+        container = mock.Mock()
+        container.attrs = {'some': 'value'}
+        self.pw.check_container = mock.Mock(return_value=container)
+        self.pw.systemd.start = mock.Mock(return_value=False)
+
+        self.pw.start_container()
+        self.assertTrue(self.pw.changed)
+        container.start.assert_not_called()
+        self.pw.systemd.start.assert_called_once()
+        self.pw.module.fail_json.assert_called_once_with(
+            changed=True, msg='Container timed out', some='value')
+
+    def test_stop_container(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'stop_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        container = full_cont_list[0]
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.stop_container()
+
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.systemd.stop.assert_called_once()
+        container.stop.assert_not_called()
+        self.pw.module.fail_json.assert_not_called()
+
+    def test_stop_container_no_systemd(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'stop_container',
+                                    'restart_policy': 'no'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        container = full_cont_list[0]
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.stop_container()
+
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.systemd.stop.assert_not_called()
+        container.stop.assert_called_once()
+        self.pw.module.fail_json.assert_not_called()
+
+    def test_stop_container_already_stopped(self):
+        self.pw = get_PodmanWorker({'name': 'exited_container',
+                                    'action': 'stop_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        self.pw.pc.containers.list.return_value = full_cont_list
+        exited_container = full_cont_list[1]
+        self.pw.stop_container()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.module.fail_json.assert_not_called()
+        exited_container.stop.assert_not_called()
+
+    def test_stop_container_not_exists(self):
+        self.pw = get_PodmanWorker({'name': 'fake_container',
+                                    'action': 'stop_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.stop_container()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        for cont in full_cont_list:
+            cont.stop.assert_not_called()
+        self.pw.systemd.stop.assert_not_called()
+        self.pw.module.fail_json.assert_called_once_with(
+            msg="No such container: fake_container to stop")
+
+    def test_stop_container_not_exists_ignore_missing(self):
+        self.pw = get_PodmanWorker({'name': 'fake_container',
+                                    'action': 'stop_container',
+                                    'ignore_missing': True})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.stop_container()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        for cont in full_cont_list:
+            cont.stop.assert_not_called()
+        self.pw.systemd.stop.assert_not_called()
+        self.pw.module.fail_json.assert_not_called()
+
+    def test_stop_and_remove_container(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'stop_and_remove_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        my_container = full_cont_list[0]
+        self.pw.pc.containers.list.side_effect = [
+            full_cont_list,
+            full_cont_list,
+            full_cont_list
+        ]
+        self.pw.stop_and_remove_container()
+
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_with(all=True)
+        self.pw.systemd.stop.assert_called_once()
+        my_container.remove.assert_called_once_with(force=True)
+
+    def test_stop_and_remove_container_not_exists(self):
+        self.pw = get_PodmanWorker({'name': 'fake_container',
+                                    'action': 'stop_and_remove_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.stop_and_remove_container()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_with(all=True)
+        self.assertFalse(self.pw.systemd.stop.called)
+        for cont in full_cont_list:
+            self.assertFalse(cont.remove.called)
+
+    def test_restart_container(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'restart_container'})
+        self.pw.pc.containers.list.return_value = get_containers(
+            self.fake_data['containers'])
+        self.pw.restart_container()
+
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.systemd.restart.assert_called_once_with()
+
+    def test_restart_container_not_exists(self):
+        self.pw = get_PodmanWorker({'name': 'fake-container',
+                                    'action': 'restart_container'})
+        self.pw.pc.containers.list.return_value = get_containers(
+            self.fake_data['containers'])
+        self.pw.restart_container()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.module.fail_json.assert_called_once_with(
+            msg="No such container: fake-container")
+
+    def test_restart_systemd_timeout(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'restart_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        my_container = full_cont_list[0]
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.systemd.restart = mock.Mock(return_value=False)
+        self.pw.restart_container()
+
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.systemd.restart.assert_called_once_with()
+        self.pw.module.fail_json.assert_called_once_with(
+            changed=True, msg="Container timed out", **my_container.attrs)
+
+    def test_remove_container(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'remove_container'})
+        full_cont_list = get_containers(self.fake_data['containers'])
+        self.pw.pc.containers.list.return_value = full_cont_list
+        my_container = full_cont_list[0]
+        self.pw.remove_container()
+
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        my_container.remove.assert_called_once_with(force=True)
+
+    def test_remove_container_api_error(self):
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'remove_container'})
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.remove.side_effect = podman_error.APIError("dummy error")
+        self.pw.check_container = mock.Mock(return_value=my_container)
+
+        self.assertRaises(
+            podman_error.APIError,
+            self.pw.remove_container)
+        self.assertTrue(self.pw.changed)
+        my_container.remove.assert_called_once_with(force=True)
+
+    def test_get_container_env(self):
+        fake_env = dict(KOLLA_BASE_DISTRO='ubuntu',
+                        KOLLA_INSTALL_TYPE='binary',
+                        KOLLA_INSTALL_METATYPE='rdo')
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'get_container_env'})
+        self.fake_data['containers'][0].update(
+            self.fake_data['container_inspect'])
+        full_cont_list = get_containers(self.fake_data['containers'])
+        self.pw.pc.containers.list.return_value = full_cont_list
+        self.pw.get_container_env()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.module.exit_json.assert_called_once_with(**fake_env)
+
+    def test_get_container_env_negative(self):
+        self.pw = get_PodmanWorker({'name': 'fake_container',
+                                    'action': 'get_container_env'})
+        self.pw.pc.containers.list.return_value = get_containers(
+            self.fake_data['containers'])
+        self.pw.get_container_env()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            msg="No such container: fake_container")
+
+    def test_get_container_state(self):
+        State = {'Dead': False,
+                 'ExitCode': 0,
+                 'Pid': 12475,
+                 'StartedAt': '2016-06-07T11:22:37.66876269Z',
+                 'Status': 'running'}
+        self.fake_data['container_inspect'].update({'State': State})
+        self.pw = get_PodmanWorker({'name': 'my_container',
+                                    'action': 'get_container_state'})
+        self.fake_data['containers'][0].update({'State': State})
+        self.pw.pc.containers.list.return_value = get_containers(
+            self.fake_data['containers'])
+        self.pw.get_container_state()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.module.exit_json.assert_called_once_with(**State)
+
+    def test_get_container_state_negative(self):
+        self.pw = get_PodmanWorker({'name': 'fake_container',
+                                    'action': 'get_container_state'})
+        self.pw.pc.containers.list.return_value = get_containers(
+            self.fake_data['containers'])
+        self.pw.get_container_state()
+
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.containers.list.assert_called_once_with(all=True)
+        self.pw.module.fail_json.assert_called_once_with(
+            msg="No such container: fake_container")
+
+    def test_recreate_or_restart_container_not_container(self):
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})
+        self.pw.check_container = mock.Mock(return_value=None)
+        self.pw.start_container = mock.Mock()
+
+        self.pw.recreate_or_restart_container()
+
+        self.pw.start_container.assert_called_once_with()
+
+    def test_recreate_or_restart_container_container_copy_always(self):
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})
+        self.pw.check_container = mock.Mock(
+            return_value=construct_container(self.fake_data['containers'][0]))
+        self.pw.restart_container = mock.Mock()
+        self.pw.check_container_differs = mock.Mock(return_value=False)
+
+        self.pw.recreate_or_restart_container()
+
+        self.pw.restart_container.assert_called_once_with()
+
+    def test_recreate_or_restart_container_container_copy_always_differs(self):
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})
+        self.pw.check_container = mock.Mock(
+            return_value=construct_container(self.fake_data['containers'][0]))
+        self.pw.ensure_image = mock.Mock()
+        self.pw.start_container = mock.Mock()
+        self.pw.remove_container = mock.Mock()
+        self.pw.check_container_differs = mock.Mock(return_value=True)
+
+        self.pw.recreate_or_restart_container()
+
+        self.pw.ensure_image.assert_called_once_with()
+        self.pw.remove_container.assert_called_once_with()
+        self.pw.start_container.assert_called_once_with()
+
+    def test_recreate_or_restart_container_container_copy_once(self):
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ONCE')})
+        self.pw.check_container = mock.Mock(
+            return_value=construct_container(self.fake_data['containers'][0]))
+        self.pw.ensure_image = mock.Mock()
+        self.pw.start_container = mock.Mock()
+        self.pw.remove_container = mock.Mock()
+
+        self.pw.recreate_or_restart_container()
+
+        self.pw.ensure_image.assert_called_once_with()
+        self.pw.remove_container.assert_called_once_with()
+        self.pw.start_container.assert_called_once_with()
+
+    def test_recreate_or_restart_container_pull_before_stop(self):
+        # Testing fix for https://launchpad.net/bugs/1852572.
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ONCE')})
+        self.pw.check_container = mock.Mock(
+            return_value=construct_container(self.fake_data['containers'][0]))
+        self.pw.check_image = mock.Mock(return_value=None)
+        self.pw.pull_image = mock.Mock()
+        self.pw.start_container = mock.Mock()
+        self.pw.remove_container = mock.Mock()
+
+        self.pw.recreate_or_restart_container()
+
+        self.pw.check_image.assert_called_once_with()
+        self.pw.pull_image.assert_called_once_with()
+        self.pw.remove_container.assert_called_once_with()
+        self.pw.start_container.assert_called_once_with()
+
+
+class TestImage(base.BaseTestCase):
+    def setUp(self):
+        super(TestImage, self).setUp()
+        self.fake_data = copy.deepcopy(FAKE_DATA)
+
+    def test_check_image(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
+        self.pw.pc.images.get.return_value = construct_image(
+            self.fake_data['images'][0])
+
+        return_data = self.pw.check_image()
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.images.get.assert_called_once_with(
+            'myregistrydomain.com:5000/ubuntu:16.04')
+        self.assertEqual(self.fake_data['images'][0], return_data)
+
+    def test_compare_image(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
+        self.pw.pc.images.return_value = construct_image(
+            self.fake_data['images'][0])
+        container_info = {'Image': 'sha256:c5f1cf40',
+                          'Config': {'myregistrydomain.com:5000/ubuntu:16.04'}
+                          }
+
+        return_data = self.pw.compare_image(container_info)
+        self.assertFalse(self.pw.changed)
+        self.pw.pc.images.get.assert_called_once_with(
+            'myregistrydomain.com:5000/ubuntu:16.04')
+        self.assertTrue(return_data)
+
+    def test_compare_config_unchanged(self):
+        self.fake_data['params']['name'] = 'my_container'
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.exec_run = mock.Mock(
+            return_value=(0, 'fake_data'.encode()))
+        self.pw.pc.containers.get.return_value = my_container
+
+        return_data = self.pw.compare_config()
+        self.pw.pc.containers.get.assert_called_once_with(
+            self.fake_data['params']['name'])
+        my_container.exec_run.assert_called_once_with(
+            pwm.COMPARE_CONFIG_CMD,
+            user='root')
+        self.assertFalse(return_data)
+
+    def test_compare_config_changed(self):
+        self.fake_data['params']['name'] = 'my_container'
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.exec_run = mock.Mock(
+            return_value=(1, 'fake_data'.encode()))
+        self.pw.pc.containers.get.return_value = my_container
+
+        return_data = self.pw.compare_config()
+        self.pw.pc.containers.get.assert_called_once_with(
+            self.fake_data['params']['name'])
+        my_container.exec_run.assert_called_once_with(
+            pwm.COMPARE_CONFIG_CMD,
+            user='root')
+        self.assertTrue(return_data)
+
+    def test_compare_config_changed_container_exited(self):
+        self.fake_data['params']['name'] = 'my_container'
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.status = 'exited'
+        self.pw.pc.containers.get.return_value = my_container
+
+        return_data = self.pw.compare_config()
+        self.pw.pc.containers.get.assert_called_once_with(
+            self.fake_data['params']['name'])
+        my_container.exec_run.assert_not_called()
+        self.assertTrue(return_data)
+
+    @mock.patch('kolla_podman_worker.APIError',
+                new_callable=lambda: APIErrorStub)
+    def test_compare_config_changed_client_failure(self, stub_exception):
+        stub_exception.is_client_error = mock.Mock(return_value=True)
+        self.fake_data['params']['name'] = 'my_container'
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.exec_run = mock.Mock(side_effect=stub_exception())
+        self.pw.pc.containers.get.return_value = my_container
+
+        return_data = self.pw.compare_config()
+        self.pw.pc.containers.get.assert_called_once_with(
+            self.fake_data['params']['name'])
+        my_container.exec_run.assert_called_once_with(
+            pwm.COMPARE_CONFIG_CMD,
+            user='root')
+        self.assertTrue(return_data)
+
+    def test_compare_config_error(self):
+        self.fake_data['params']['name'] = 'my_container'
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.exec_run = mock.Mock(
+            return_value=(-1, 'fake_data'.encode()))
+        self.pw.pc.containers.get.return_value = my_container
+
+        self.assertRaises(Exception, self.pw.compare_config)  # noqa: H202
+        self.pw.pc.containers.get.assert_called_once_with(
+            self.fake_data['params']['name'])
+        my_container.exec_run.assert_called_once_with(
+            pwm.COMPARE_CONFIG_CMD,
+            user='root')
+
+    def test_compare_config_error_server_failure(self):
+        self.fake_data['params']['name'] = 'my_container'
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        my_container = construct_container(self.fake_data['containers'][0])
+        my_container.exec_run = mock.Mock(
+            side_effect=podman_error.APIError("foo"))
+        self.pw.pc.containers.get.return_value = my_container
+
+        self.assertRaises(podman_error.APIError, self.pw.compare_config)
+        self.pw.pc.containers.get.assert_called_once_with(
+            self.fake_data['params']['name'])
+        my_container.exec_run.assert_called_once_with(
+            pwm.COMPARE_CONFIG_CMD,
+            user='root')
+
+    def test_pull_image_new(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04',
+             'auth_username': 'fake_user',
+             'auth_password': 'fake_psw',
+             'auth_registry': 'myrepo/myapp',
+             'auth_email': 'fake_mail@foogle.com'
+             })
+        self.pw.pc.images.pull.return_value = construct_image(
+            self.fake_data['images'][0])
+        self.pw.pc.images.get.return_value = construct_image({})
+
+        self.pw.pull_image()
+        self.pw.pc.images.pull.assert_called_once_with(
+            repository='myregistrydomain.com:5000/ubuntu',
+            tag='16.04',
+            tls_verify=False,
+            stream=False,
+            auth_config={'username': 'fake_user', 'password': 'fake_psw'}
+        )
+        self.assertTrue(self.pw.changed)
+
+    def test_pull_image_exists(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04',
+             'auth_username': 'fake_user',
+             'auth_password': 'fake_psw',
+             'auth_registry': 'myrepo/myapp',
+             'auth_email': 'fake_mail@foogle.com'
+             })
+        image = construct_image(self.fake_data['images'][0])
+        self.pw.pc.images.pull.return_value = image
+        self.pw.pc.images.get.return_value = image
+
+        self.pw.pull_image()
+        self.pw.pc.images.pull.assert_called_once_with(
+            repository='myregistrydomain.com:5000/ubuntu',
+            tag='16.04',
+            tls_verify=False,
+            stream=False,
+            auth_config={'username': 'fake_user', 'password': 'fake_psw'}
+        )
+        self.assertFalse(self.pw.changed)
+
+    def test_pull_image_not_exists(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'unknown:16.04'})
+        self.pw.pc.images.pull.return_value = construct_image({})
+        self.pw.check_image = mock.Mock(return_value={})
+
+        self.pw.pull_image()
+        self.pw.pc.images.pull.assert_called_once_with(
+            repository='unknown',
+            tag='16.04',
+            tls_verify=False,
+            stream=False,
+        )
+        self.assertFalse(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            msg="The requested image does not exist: unknown:16.04",
+            failed=True)
+
+    def test_pull_image_error(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
+        self.pw.pc.images.pull = mock.Mock(
+            side_effect=podman_error.APIError("unexpected error"))
+        self.pw.pc.images.get.return_value = construct_image(
+            self.fake_data['images'][0])
+
+        self.pw.pull_image()
+        self.pw.pc.images.pull.assert_called_once_with(
+            repository='myregistrydomain.com:5000/ubuntu',
+            tag='16.04',
+            tls_verify=False,
+            stream=False
+        )
+        self.assertFalse(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            msg="Unknown error message: unexpected error",
+            failed=True)
+
+    def test_remove_image(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04',
+             'action': 'remove_image'})
+        image = construct_image(self.fake_data['images'][0])
+        self.pw.pc.images.get.return_value = image
+
+        self.pw.remove_image()
+        self.assertTrue(self.pw.changed)
+        image.remove.assert_called_once()
+
+    def test_remove_image_not_exists(self):
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/non_existing:16.04',
+             'action': 'remove_image'})
+        self.pw.pc.images.get.return_value = construct_image({})
+
+        self.pw.remove_image()
+        self.assertFalse(self.pw.changed)
+
+    def test_remove_image_exception_409(self):
+        resp = mock.MagicMock()
+        resp.status_code = 409
+        podman_except = podman_error.APIError('test error', resp)
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04',
+             'action': 'remove_image'})
+        image = construct_image(self.fake_data['images'][0])
+        image.remove = mock.Mock(side_effect=podman_except)
+        self.pw.pc.images.get.return_value = image
+
+        self.assertRaises(podman_error.APIError, self.pw.remove_image)
+        self.assertTrue(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            failed=True,
+            msg=("Image 'myregistrydomain.com:5000/ubuntu:16.04' "
+                 "is currently in-use")
+        )
+
+    def test_remove_image_server_error(self):
+        resp = mock.MagicMock()
+        resp.status_code = 500
+        podman_except = podman_error.APIError('test error', resp)
+        self.pw = get_PodmanWorker(
+            {'image': 'myregistrydomain.com:5000/ubuntu:16.04',
+             'action': 'remove_image'})
+        image = construct_image(self.fake_data['images'][0])
+        image.remove = mock.Mock(side_effect=podman_except)
+        self.pw.pc.images.get.return_value = image
+
+        self.assertRaises(podman_error.APIError, self.pw.remove_image)
+        self.assertTrue(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            failed=True,
+            msg=(f"Internal error: {str(podman_except)}")
+        )
+
+
+class TestVolume(base.BaseTestCase):
+    def setUp(self):
+        super(TestVolume, self).setUp()
+        self.fake_data = copy.deepcopy(FAKE_DATA)
+        self.volumes = [
+            {'Driver': 'local',
+             'Labels': {},
+             'Mountpoint': '/var/lib/docker/volumes/nova_compute/_data',
+             'Name': 'nova_compute'},
+            {'Driver': 'local',
+             'Labels': {},
+             'Mountpoint': '/var/lib/docker/volumes/mariadb/_data',
+             'Name': 'mariadb'}]
+
+    def test_parse_volumes_mounts(self):
+        in_volumes = [
+            '/etc/kolla/mariadb/:/var/lib/kolla/config_files/:shared',
+            '/etc/localtime:/etc/localtime:ro',
+            '',
+        ]
+        out_mounts = []
+        out_volumes = {}
+        expected_mounts = [
+            {'source': '/etc/kolla/mariadb/',
+             'target': '/var/lib/kolla/config_files/',
+             'type': 'bind',
+             'propagation': 'shared'},
+            {'source': '/etc/localtime',
+             'target': '/etc/localtime',
+             'type': 'bind',
+             'propagation': 'rprivate',
+             'read_only': True}
+        ]
+        self.pw = get_PodmanWorker({})
+
+        self.pw.parse_volumes(in_volumes, out_mounts, out_volumes)
+        self.assertFalse(self.pw.changed)
+        self.assertEqual(expected_mounts, out_mounts)
+        self.assertEqual({}, out_volumes)
+        self.pw.module.fail_json.assert_not_called()
+
+    def test_parse_volumes_filtered_volumes(self):
+        in_volumes = [
+            '',
+            'mariadb:/var/lib/mysql',
+            'kolla_logs:/var/log/kolla/'
+        ]
+        out_mounts = []
+        out_volumes = {}
+        expected_volumes = {
+            'mariadb': {'bind': '/var/lib/mysql', 'mode': 'rw'},
+            'kolla_logs': {'bind': '/var/log/kolla/', 'mode': 'rw'}}
+        self.pw = get_PodmanWorker({})
+
+        self.pw.parse_volumes(in_volumes, out_mounts, out_volumes)
+        self.assertFalse(self.pw.changed)
+        self.assertEqual([], out_mounts)
+        self.assertEqual(expected_volumes, out_volumes)
+        self.pw.module.fail_json.assert_not_called()
+
+    def test_create_volume(self):
+        self.pw = get_PodmanWorker({'name': 'rabbitmq',
+                                    'action': 'create_volume'})
+        self.pw.pc.volumes.get.return_value = construct_volume({})
+
+        self.pw.create_volume()
+        self.pw.pc.volumes.get.assert_called_once_with('rabbitmq')
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.volumes.create.assert_called_once_with(
+            name='rabbitmq',
+            driver='local')
+
+    def test_create_volume_exists(self):
+        self.pw = get_PodmanWorker({'name': 'nova_compute',
+                                    'action': 'create_volume'})
+        self.pw.pc.volumes.get.return_value = construct_volume(
+            self.volumes[0])
+
+        self.pw.create_volume()
+        self.pw.pc.volumes.get.assert_called_once_with('nova_compute')
+        self.assertFalse(self.pw.changed)
+
+    def test_remove_volume(self):
+        self.pw = get_PodmanWorker({'name': 'nova_compute',
+                                    'action': 'remove_volume'})
+        self.pw.pc.volumes.get.return_value = construct_volume(
+            self.volumes[0])
+
+        self.pw.remove_volume()
+        self.assertTrue(self.pw.changed)
+        self.pw.pc.volumes.remove.assert_called_once_with('nova_compute')
+
+    def test_remove_volume_not_exists(self):
+        self.pw = get_PodmanWorker({'name': 'rabbitmq',
+                                    'action': 'remove_volume'})
+        self.pw.pc.volumes.get.return_value = construct_volume({})
+
+        self.pw.remove_volume()
+        self.assertFalse(self.pw.changed)
+
+    def test_remove_volume_exception(self):
+        resp = mock.MagicMock()
+        resp.status_code = 409
+        docker_except = podman_error.APIError('test error', resp)
+        self.pw = get_PodmanWorker({'name': 'nova_compute',
+                                    'action': 'remove_volume'})
+        self.pw.pc.volumes.get.return_value = construct_volume(self.volumes[0])
+        self.pw.pc.volumes.remove.side_effect = docker_except
+
+        self.assertRaises(podman_error.APIError, self.pw.remove_volume)
+        self.assertTrue(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            failed=True,
+            msg="Volume named 'nova_compute' is currently in-use"
+        )
+
+    def test_remove_volume_error(self):
+        resp = mock.MagicMock()
+        resp.status_code = 500
+        docker_except = podman_error.APIError(
+            'test error', resp, 'server error')
+        self.pw = get_PodmanWorker({'name': 'nova_compute',
+                                    'action': 'remove_volume'})
+        self.pw.pc.volumes.get.return_value = construct_volume(self.volumes[0])
+        self.pw.pc.volumes.remove.side_effect = docker_except
+
+        self.assertRaises(podman_error.APIError, self.pw.remove_volume)
+        self.assertTrue(self.pw.changed)
+        self.pw.module.fail_json.assert_called_once_with(
+            failed=True,
+            msg="Internal error: server error"
+        )
+
+
+class TestAttrComp(base.BaseTestCase):
+
+    def setUp(self):
+        super(TestAttrComp, self).setUp()
+        self.fake_data = copy.deepcopy(FAKE_DATA)
+
+    def test_compare_cap_add_neg(self):
+        container_info = {'HostConfig': dict(CapAdd=['data'])}
+        self.pw = get_PodmanWorker({'cap_add': ['data']})
+        self.assertFalse(self.pw.compare_cap_add(container_info))
+
+    def test_compare_cap_add_pos(self):
+        container_info = {'HostConfig': dict(CapAdd=['data1'])}
+        self.pw = get_PodmanWorker({'cap_add': ['data2']})
+        self.assertTrue(self.pw.compare_cap_add(container_info))
+
+    def test_compare_ipc_mode_neg(self):
+        container_info = {'HostConfig': dict(IpcMode='data')}
+        self.pw = get_PodmanWorker({'ipc_mode': 'data'})
+        self.assertFalse(self.pw.compare_ipc_mode(container_info))
+
+    def test_compare_ipc_mode_pos(self):
+        container_info = {'HostConfig': dict(IpcMode='data1')}
+        self.pw = get_PodmanWorker({'ipc_mode': 'data2'})
+        self.assertTrue(self.pw.compare_ipc_mode(container_info))
+
+    def test_compare_security_opt_neg(self):
+        container_info = {'HostConfig': dict(SecurityOpt=['data'])}
+        self.pw = get_PodmanWorker({'security_opt': ['data']})
+        self.assertFalse(self.pw.compare_security_opt(container_info))
+
+    def test_compare_security_opt_pos(self):
+        container_info = {'HostConfig': dict(SecurityOpt=['data1'])}
+        self.pw = get_PodmanWorker({'security_opt': ['data2']})
+        self.assertTrue(self.pw.compare_security_opt(container_info))
+
+    def test_compare_pid_mode_neg(self):
+        container_info = {'HostConfig': dict(PidMode='host')}
+        self.pw = get_PodmanWorker({'pid_mode': 'host'})
+        self.assertFalse(self.pw.compare_pid_mode(container_info))
+
+    def test_compare_pid_mode_pos(self):
+        container_info = {'HostConfig': dict(PidMode='host1')}
+        self.pw = get_PodmanWorker({'pid_mode': 'host2'})
+        self.assertTrue(self.pw.compare_pid_mode(container_info))
+
+    def test_compare_cgroupns_mode_neg(self):
+        container_info = {'HostConfig': dict(CgroupMode='host')}
+        self.pw = get_PodmanWorker({'cgroupns_mode': 'host'})
+        self.assertFalse(self.pw.compare_cgroupns_mode(container_info))
+
+    def test_compare_cgroupns_mode_neg_backward_compat(self):
+        container_info = {'HostConfig': dict(CgroupMode='')}
+        self.pw = get_PodmanWorker({'cgroupns_mode': 'host'})
+        self.assertFalse(self.pw.compare_cgroupns_mode(container_info))
+
+    def test_compare_cgroupns_mode_ignore(self):
+        container_info = {'HostConfig': dict(CgroupMode='private')}
+        self.pw = get_PodmanWorker({})
+        self.assertFalse(self.pw.compare_cgroupns_mode(container_info))
+
+    def test_compare_cgroupns_mode_pos(self):
+        container_info = {'HostConfig': dict(CgroupMode='private')}
+        self.pw = get_PodmanWorker({'cgroupns_mode': 'host', 'debug': True})
+        self.assertTrue(self.pw.compare_cgroupns_mode(container_info))
+
+    def test_compare_cgroupns_mode_pos_backward_compat(self):
+        container_info = {'HostConfig': dict(CgroupMode='')}
+        self.pw = get_PodmanWorker({'cgroupns_mode': 'private', 'debug': True})
+        self.assertTrue(self.pw.compare_cgroupns_mode(container_info))
+
+    def test_compare_cgroupns_mode_unsupported(self):
+        container_info = {'HostConfig': dict()}
+        self.pw = get_PodmanWorker({'cgroupns_mode': 'host'})
+        self.assertFalse(self.pw.compare_cgroupns_mode(container_info))
+
+    def test_compare_privileged_neg(self):
+        container_info = {'HostConfig': dict(Privileged=True)}
+        self.pw = get_PodmanWorker({'privileged': True})
+        self.assertFalse(self.pw.compare_privileged(container_info))
+
+    def test_compare_privileged_pos(self):
+        container_info = {'HostConfig': dict(Privileged=True)}
+        self.pw = get_PodmanWorker({'privileged': False})
+        self.assertTrue(self.pw.compare_privileged(container_info))
+
+    def test_compare_labels_neg(self):
+        container_info = {'Config': dict(Labels={'kolla_version': '2.0.1'})}
+        self.pw = get_PodmanWorker({'labels': {'kolla_version': '2.0.1'}})
+        self.pw.check_image = mock.MagicMock(return_value=dict(
+            Labels={'kolla_version': '2.0.1'}))
+        self.assertFalse(self.pw.compare_labels(container_info))
+
+    def test_compare_labels_pos(self):
+        container_info = {'Config': dict(Labels={'kolla_version': '1.0.1'})}
+        self.pw = get_PodmanWorker({'labels': {'kolla_version': '2.0.1'}})
+        self.pw.check_image = mock.MagicMock(return_value=dict(
+            Labels={'kolla_version': '1.0.1'}))
+        self.assertTrue(self.pw.compare_labels(container_info))
+
+    def test_compare_tmpfs_neg(self):
+        container_info = {'HostConfig': dict(Tmpfs=['foo'])}
+        self.pw = get_PodmanWorker({'tmpfs': ['foo']})
+
+        self.assertFalse(self.pw.compare_tmpfs(container_info))
+
+    def test_compare_tmpfs_neg_empty_string(self):
+        container_info = {'HostConfig': dict()}
+        self.pw = get_PodmanWorker({'tmpfs': ['']})
+
+        self.assertFalse(self.pw.compare_tmpfs(container_info))
+
+    def test_compare_tmpfs_pos_different(self):
+        container_info = {'HostConfig': dict(Tmpfs=['foo'])}
+        self.pw = get_PodmanWorker({'tmpfs': ['bar']})
+
+        self.assertTrue(self.pw.compare_tmpfs(container_info))
+
+    def test_compare_tmpfs_pos_empty_new(self):
+        container_info = {'HostConfig': dict(Tmpfs=['foo'])}
+        self.pw = get_PodmanWorker({})
+
+        self.assertTrue(self.pw.compare_tmpfs(container_info))
+
+    def test_compare_tmpfs_pos_empty_current(self):
+        container_info = {'HostConfig': dict()}
+        self.pw = get_PodmanWorker({'tmpfs': ['bar']})
+
+        self.assertTrue(self.pw.compare_tmpfs(container_info))
+
+    def test_compare_volumes_from_neg(self):
+        container_info = {'HostConfig': dict(VolumesFrom=['777f7dc92da7'])}
+        self.pw = get_PodmanWorker({'volumes_from': ['777f7dc92da7']})
+
+        self.assertFalse(self.pw.compare_volumes_from(container_info))
+
+    def test_compare_volumes_from_post(self):
+        container_info = {'HostConfig': dict(VolumesFrom=['777f7dc92da7'])}
+        self.pw = get_PodmanWorker({'volumes_from': ['ba8c0c54f0f2']})
+
+        self.assertTrue(self.pw.compare_volumes_from(container_info))
+
+    def test_compare_volumes_neg(self):
+        container_info = {
+            'Config': dict(Volumes=['/var/log/kolla/']),
+            'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])}
+        self.pw = get_PodmanWorker(
+            {'volumes': ['kolla_logs:/var/log/kolla/:rw']})
+
+        self.assertFalse(self.pw.compare_volumes(container_info))
+
+    def test_compare_volumes_pos(self):
+        container_info = {
+            'Config': dict(Volumes=['/var/log/kolla/']),
+            'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])}
+        self.pw = get_PodmanWorker(
+            {'volumes': ['/dev/:/dev/:rw']})
+
+        self.assertTrue(self.pw.compare_volumes(container_info))
+
+    def test_compare_environment_neg(self):
+        container_info = {'Config': dict(
+            Env=['KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
+                 'KOLLA_BASE_DISTRO=ubuntu',
+                 'KOLLA_INSTALL_TYPE=binary']
+        )}
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS',
+                                KOLLA_BASE_DISTRO='ubuntu',
+                                KOLLA_INSTALL_TYPE='binary')})
+
+        self.assertFalse(self.pw.compare_environment(container_info))
+
+    def test_compare_environment_pos(self):
+        container_info = {'Config': dict(
+            Env=['KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
+                 'KOLLA_BASE_DISTRO=ubuntu',
+                 'KOLLA_INSTALL_TYPE=binary']
+        )}
+        self.pw = get_PodmanWorker({
+            'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS',
+                                KOLLA_BASE_DISTRO='centos',
+                                KOLLA_INSTALL_TYPE='binary')})
+
+        self.assertTrue(self.pw.compare_environment(container_info))
+
+    def test_compare_container_state_pos(self):
+        container_info = {'State': dict(Status='running')}
+        self.pw = get_PodmanWorker({'state': 'exited'})
+        self.assertTrue(self.pw.compare_container_state(container_info))
+
+    def test_compare_container_state_neg(self):
+        container_info = {'State': dict(Status='running')}
+        self.pw = get_PodmanWorker({'state': 'running'})
+        self.assertFalse(self.pw.compare_container_state(container_info))
+
+    def test_compare_dimensions_pos(self):
+        self.fake_data['params']['dimensions'] = {
+            'blkio_weight': 10, 'mem_limit': 30}
+        container_info = dict()
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_dimensions(container_info))
+
+    def test_compare_dimensions_neg(self):
+        self.fake_data['params']['dimensions'] = {
+            'blkio_weight': 10}
+        container_info = dict()
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 10,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertFalse(self.pw.compare_dimensions(container_info))
+
+    def test_compare_wrong_dimensions(self):
+        self.fake_data['params']['dimensions'] = {
+            'blki_weight': 0}
+        container_info = dict()
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.compare_dimensions(container_info)
+        self.pw.module.exit_json.assert_called_once_with(
+            failed=True, msg=repr("Unsupported dimensions"),
+            unsupported_dimensions=set(['blki_weight']))
+
+    def test_compare_empty_dimensions(self):
+        self.fake_data['params']['dimensions'] = dict()
+        container_info = dict()
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '1', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_dimensions(container_info))
+
+    def test_compare_dimensions_removed_and_changed(self):
+        self.fake_data['params']['dimensions'] = {
+            'mem_reservation': 10}
+        container_info = dict()
+        # Here mem_limit and mem_reservation are already present
+        # Now we are updating only 'mem_reservation'.
+        # Ideally it should return True stating that the podman
+        # dimensions have been changed.
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 10, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 10,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_dimensions(container_info))
+
+    def test_compare_dimensions_explicit_default(self):
+        self.fake_data['params']['dimensions'] = {
+            'mem_reservation': 0}
+        container_info = dict()
+        # Here mem_limit and mem_reservation are already present
+        # Now we are updating only 'mem_reservation'.
+        # Ideally it should return True stating that the podman
+        # dimensions have been changed.
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertFalse(self.pw.compare_dimensions(container_info))
+
+    def test_compare_ulimits_pos(self):
+        self.fake_data['params']['dimensions'] = {
+            'ulimits': {'nofile': {'soft': 131072, 'hard': 131072}}}
+        container_info = dict()
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': []}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_dimensions(container_info))
+
+    def test_compare_ulimits_neg(self):
+        self.fake_data['params']['dimensions'] = {
+            'ulimits': {'nofile': {'soft': 131072, 'hard': 131072}}}
+        ulimits_nofile = {'Name': 'nofile',
+                          'Soft': 131072, 'Hard': 131072}
+        container_info = dict()
+        container_info['HostConfig'] = {
+            'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0,
+            'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0,
+            'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0,
+            'Ulimits': [ulimits_nofile]}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertFalse(self.pw.compare_dimensions(container_info))
+
+    def test_compare_empty_new_healthcheck(self):
+        container_info = dict()
+        container_info['Config'] = {
+            'Healthcheck': {
+                'Test': [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_healthcheck(container_info))
+
+    def test_compare_empty_current_healthcheck(self):
+        self.fake_data['params']['healthcheck'] = {
+            'test': ['CMD-SHELL', '/bin/check.sh'],
+            'interval': 30,
+            'timeout': 30,
+            'start_period': 5,
+            'retries': 3}
+        container_info = dict()
+        container_info['Config'] = {}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_healthcheck(container_info))
+
+    def test_compare_healthcheck_no_test(self):
+        self.fake_data['params']['healthcheck'] = {
+            'interval': 30,
+            'timeout': 30,
+            'start_period': 5,
+            'retries': 3}
+        container_info = dict()
+        container_info['Config'] = {
+            'Healthcheck': {
+                'Test': [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.compare_healthcheck(container_info)
+        self.pw.module.exit_json.assert_called_once_with(
+            failed=True, msg=repr("Missing healthcheck option"),
+            missing_healthcheck=set(['test']))
+
+    def test_compare_healthcheck_pos(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD', '/bin/check']}
+        container_info = dict()
+        container_info['Config'] = {
+            'Healthcheck': {
+                'Test': [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_healthcheck(container_info))
+
+    def test_compare_healthcheck_neg(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'interval': 30,
+             'timeout': 30,
+             'start_period': 5,
+             'retries': 3}
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertFalse(self.pw.compare_healthcheck(container_info))
+
+    def test_compare_healthcheck_time_zero(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'interval': 0,
+             'timeout': 30,
+             'start_period': 5,
+             'retries': 3}
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_healthcheck(container_info))
+
+    def test_compare_healthcheck_time_wrong_type(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'timeout': 30,
+             'start_period': 5,
+             'retries': 3}
+        self.fake_data['params']['healthcheck']['interval'] = \
+            {"broken": {"interval": "True"}}
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertRaises(TypeError,
+                          lambda: self.pw.compare_healthcheck(container_info))
+
+    def test_compare_healthcheck_time_wrong_value(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'timeout': 30,
+             'start_period': 5,
+             'retries': 3}
+        self.fake_data['params']['healthcheck']['interval'] = "dog"
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertRaises(ValueError,
+                          lambda: self.pw.compare_healthcheck(container_info))
+
+    def test_compare_healthcheck_opt_missing(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'interval': 30,
+             'timeout': 30,
+             'retries': 3}
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.compare_healthcheck(container_info)
+        self.pw.module.exit_json.assert_called_once_with(
+            failed=True, msg=repr("Missing healthcheck option"),
+            missing_healthcheck=set(['start_period']))
+
+    def test_compare_healthcheck_opt_extra(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'interval': 30,
+             'start_period': 5,
+             'extra_option': 1,
+             'timeout': 30,
+             'retries': 3}
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.pw.compare_healthcheck(container_info)
+        self.pw.module.exit_json.assert_called_once_with(
+            failed=True, msg=repr("Unsupported healthcheck options"),
+            unsupported_healthcheck=set(['extra_option']))
+
+    def test_compare_healthcheck_value_false(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['CMD-SHELL', '/bin/check.sh'],
+             'interval': 30,
+             'start_period': 5,
+             'extra_option': 1,
+             'timeout': 30,
+             'retries': False}
+        container_info = dict()
+        container_info['Config'] = {
+            "Healthcheck": {
+                "Test": [
+                    "CMD-SHELL",
+                    "/bin/check.sh"],
+                "Interval": 30000000000,
+                "Timeout": 30000000000,
+                "StartPeriod": 5000000000,
+                "Retries": 3}}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertTrue(self.pw.compare_healthcheck(container_info))
+
+    def test_parse_healthcheck_empty(self):
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertIsNone(self.pw.parse_healthcheck(
+                          self.fake_data.get('params', {}).get('healthcheck')))
+
+    def test_parse_healthcheck_test_none(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': 'NONE'}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertIsNone(self.pw.parse_healthcheck(
+                          self.fake_data['params']['healthcheck']))
+
+    def test_parse_healthcheck_test_none_brackets(self):
+        self.fake_data['params']['healthcheck'] = \
+            {'test': ['NONE']}
+        self.pw = get_PodmanWorker(self.fake_data['params'])
+        self.assertIsNone(self.pw.parse_healthcheck(
+                          self.fake_data['params']['healthcheck']))
diff --git a/tests/setup_gate.sh b/tests/setup_gate.sh
index 7be0a8daaaeadf4d0eb926e691071252b1169b36..2446d4f8eab460de1569e2be34cfae141bcbfdc7 100755
--- a/tests/setup_gate.sh
+++ b/tests/setup_gate.sh
@@ -103,11 +103,15 @@ function prepare_images {
     fi
 
     sudo tee -a /etc/kolla/kolla-build.conf <<EOF
+[DEFAULT]
+engine = ${CONTAINER_ENGINE}
+
 [profiles]
 gate = ${GATE_IMAGES}
 EOF
 
-    mkdir -p /tmp/logs/build
+    sudo mkdir -p /tmp/logs/build
+    sudo mkdir -p /opt/kolla_registry
 
     sudo $CONTAINER_ENGINE run -d --net=host -e REGISTRY_HTTP_ADDR=0.0.0.0:4000 --restart=always -v /opt/kolla_registry/:/var/lib/registry --name registry registry:2
 
diff --git a/tests/test-kolla-toolbox.yml b/tests/test-kolla-toolbox.yml
index 50a60afdeb761169601cf36b05d067b3b816a70e..566d53220e8f9b65d1cb05b7a25ef36846435db6 100644
--- a/tests/test-kolla-toolbox.yml
+++ b/tests/test-kolla-toolbox.yml
@@ -2,80 +2,11 @@
 - name: Test the kolla_toolbox module
   hosts: localhost
   gather_facts: false
+  vars:
+    container_engines:
+      - "docker"
+      - "podman"
   tasks:
-    - name: Test successful & unchanged
-      kolla_toolbox:
-        module_name: debug
-        module_args:
-          msg: hi
-      register: result
-
-    - name: Assert result is successful
-      assert:
-        that: result is successful
-
-    - name: Assert result is not changed
-      assert:
-        that: result is not changed
-
-    - name: Test successful & changed
-      kolla_toolbox:
-        module_name: command
-        module_args:
-          echo hi
-      register: result
-
-    - name: Assert result is successful
-      assert:
-        that: result is successful
-
-    - name: Assert result is changed
-      assert:
-        that: result is changed
-
-    - name: Test unsuccessful
-      kolla_toolbox:
-        module_name: command
-        module_args:
-          foo
-      register: result
-      ignore_errors: true
-
-    - name: Assert result is failed
-      assert:
-        that: result is failed
-
-    - name: Test invalid module parameters
-      kolla_toolbox:
-        module_name: debug
-        module_args:
-          foo: bar
-      register: result
-      ignore_errors: true
-
-    - name: Assert result is failed
-      assert:
-        that: result is failed
-
-    - name: Setup for Test successful & changed (JSON format)
-      kolla_toolbox:
-        module_name: file
-        module_args:
-          path: /tmp/foo
-          state: absent
-
-    - name: Test successful & changed (JSON format)
-      kolla_toolbox:
-        module_name: file
-        module_args:
-          path: /tmp/foo
-          state: directory
-      register: result
-
-    - name: Assert result is successful
-      assert:
-        that: result is successful
-
-    - name: Assert result is changed
-      assert:
-        that: result is changed
+    - name: Test kolla-toolbox for each container engine
+      include_tasks: kolla-toolbox-testsuite.yml
+      with_items: "{{ container_engines }}"
diff --git a/tox.ini b/tox.ini
index ec9a22faedc1aa9c607ccf059cee9d7403f0b183..f914829a7d0684e216cd0d0611ef190cf5c5b7e1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,6 +12,7 @@ allowlist_externals = bash
 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
        -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
+       podman>=4.3.0,<5
 passenv = http_proxy,HTTP_PROXY,https_proxy,HTTPS_PROXY,no_proxy,NO_PROXY, \
           OS_STDOUT_CAPTURE,OS_STDERR_CAPTURE,OS_LOG_CAPTURE,OS_TEST_TIMEOUT, \
           PYTHON,OS_TEST_PATH,LISTOPT,IDOPTION
@@ -93,6 +94,7 @@ setenv =
   ANSIBLE_LIBRARY = {toxinidir}/ansible/library
   ANSIBLE_ACTION_PLUGINS = {toxinidir}/ansible/action_plugins
   ANSIBLE_FILTER_PLUGINS = {toxinidir}/ansible/filter_plugins
+
 deps =
   -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
   -r{toxinidir}/requirements.txt
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index c5063f0f7877ba17cc8c3f003d97fdcd18cd1a8b..b751d80b15dd8ae9e97a0abb0acf4e839978e790 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -17,6 +17,16 @@
     required-projects:
       - openstack/kolla
 
+- job:
+    name: kolla-ansible-debian-aarch64-podman
+    parent: kolla-ansible-debian
+    nodeset: kolla-ansible-debian-bookworm-aarch64
+    timeout: 10800
+    vars:
+      container_engine: podman
+    required-projects:
+      - openstack/kolla
+
 - job:
     name: kolla-ansible-debian
     parent: kolla-ansible-base
@@ -25,6 +35,15 @@
       base_distro: debian
       tls_enabled: true
 
+- job:
+    name: kolla-ansible-debian-podman
+    parent: kolla-ansible-base
+    nodeset: kolla-ansible-debian-bookworm
+    vars:
+      base_distro: debian
+      tls_enabled: true
+      container_engine: podman
+
 - job:
     name: kolla-ansible-openeuler
     parent: kolla-ansible-base
@@ -42,6 +61,15 @@
       base_distro: rocky
       tls_enabled: true
 
+- job:
+    name: kolla-ansible-rocky9-podman
+    parent: kolla-ansible-base
+    nodeset: kolla-ansible-rocky9
+    vars:
+      base_distro: rocky
+      tls_enabled: true
+      container_engine: podman
+
 - job:
     name: kolla-ansible-ubuntu
     parent: kolla-ansible-base
@@ -50,6 +78,15 @@
       base_distro: ubuntu
       tls_enabled: true
 
+- job:
+    name: kolla-ansible-ubuntu-podman
+    parent: kolla-ansible-base
+    nodeset: kolla-ansible-jammy
+    vars:
+      base_distro: ubuntu
+      tls_enabled: true
+      container_engine: podman
+
 - job:
     name: kolla-ansible-rocky9-kvm
     parent: kolla-ansible-kvm-base
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 11dc1a1c6d864dd08709fef9b0115b3689a0361c..c08e09abc789d2966e2b2a756b5fab0feea43843 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -13,9 +13,12 @@
       jobs:
         - kolla-ansible-centos9s
         - kolla-ansible-debian
+        - kolla-ansible-debian-podman
         - kolla-ansible-openeuler
         - kolla-ansible-rocky9
+        - kolla-ansible-rocky9-podman
         - kolla-ansible-ubuntu
+        - kolla-ansible-ubuntu-podman
         - kolla-ansible-rocky9-kvm
         - kolla-ansible-ubuntu-kvm
         - kolla-ansible-rocky9-multinode-ipv6
@@ -62,15 +65,19 @@
     check-arm64:
       jobs:
         - kolla-ansible-debian-aarch64
+        - kolla-ansible-debian-aarch64-podman
         - kolla-ansible-debian-upgrade-aarch64
     gate:
       jobs:
         - kolla-ansible-debian
         - kolla-ansible-debian-upgrade
+        - kolla-ansible-debian-podman
         - kolla-ansible-rocky9
         - kolla-ansible-rocky9-upgrade
+        - kolla-ansible-rocky9-podman
         - kolla-ansible-ubuntu
         - kolla-ansible-ubuntu-upgrade
+        - kolla-ansible-ubuntu-podman
     experimental:
       jobs:
         - kolla-ansible-rocky9-swift-upgrade