diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9fbca52ce8cc9c7222a6929f7311720364f31e0f
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,10 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+# Order matters to the pip dependency resolver, so sorting this file
+# changes how packages are installed.  New dependencies should be
+# added in alphabetical order, however, some dependencies may need to
+# be installed in a specific order.
+openstackdocstheme>=1.18.1 # Apache-2.0
+reno>=2.5.0 # Apache-2.0
+sphinx!=1.6.6,>=1.6.2 # BSD
diff --git a/doc/source/admin/advanced-configuration.rst b/doc/source/admin/advanced-configuration.rst
index 36d3928c3b84bd620017db706f1cbaa4438e7671..fd9f9fa78e3dfb5f70aa8f8570abaa2d0f6faffd 100644
--- a/doc/source/admin/advanced-configuration.rst
+++ b/doc/source/admin/advanced-configuration.rst
@@ -26,7 +26,7 @@ For the combined option, set the two variables below, while allowing the
 other two to accept their default values. In this configuration all REST
 API requests, internal and external, will flow over the same network.
 
-.. code-block:: none
+.. code-block:: yaml
 
    kolla_internal_vip_address: "10.10.10.254"
    network_interface: "eth0"
@@ -37,7 +37,7 @@ For the separate option, set these four variables. In this configuration
 the internal and external REST API requests can flow over separate
 networks.
 
-.. code-block:: none
+.. code-block:: yaml
 
    kolla_internal_vip_address: "10.10.10.254"
    network_interface: "eth0"
@@ -57,7 +57,7 @@ in your kolla deployment use the variables:
 - kolla_internal_fqdn
 - kolla_external_fqdn
 
-.. code-block:: none
+.. code-block:: yaml
 
    kolla_internal_fqdn: inside.mykolla.example.net
    kolla_external_fqdn: mykolla.example.net
@@ -95,7 +95,7 @@ The configuration variables that control TLS networking are:
 
 The default for TLS is disabled, to enable TLS networking:
 
-.. code-block:: none
+.. code-block:: yaml
 
    kolla_enable_tls_external: "yes"
    kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/mycert.pem"
@@ -174,7 +174,7 @@ OpenStack Service Configuration in Kolla
 An operator can change the location where custom config files are read from by
 editing ``/etc/kolla/globals.yml`` and adding the following line.
 
-.. code-block:: none
+.. code-block:: yaml
 
    # The directory to merge custom config files the kolla's config files
    node_custom_config: "/etc/kolla/config"
@@ -251,7 +251,7 @@ If a development environment doesn't have a free IP address available for VIP
 configuration, the host's IP address may be used here by disabling HAProxy by
 adding:
 
-.. code-block:: none
+.. code-block:: yaml
 
    enable_haproxy: "no"
 
@@ -267,7 +267,7 @@ External Elasticsearch/Kibana environment
 It is possible to use an external Elasticsearch/Kibana environment. To do this
 first disable the deployment of the central logging.
 
-.. code-block:: none
+.. code-block:: yaml
 
    enable_central_logging: "no"
 
@@ -283,7 +283,7 @@ It is sometimes required to use a different than default port
 for service(s) in Kolla. It is possible with setting
 ``<service>_port`` in ``globals.yml`` file. For example:
 
-.. code-block:: none
+.. code-block:: yaml
 
    database_port: 3307
 
@@ -299,7 +299,7 @@ By default, Fluentd is used as a syslog server to collect Swift and HAProxy
 logs. When Fluentd is disabled or you want to use an external syslog server,
 You can set syslog parameters in ``globals.yml`` file. For example:
 
-.. code-block:: none
+.. code-block:: yaml
 
    syslog_server: "172.29.9.145"
    syslog_udp_port: "514"
@@ -309,7 +309,7 @@ You can set syslog parameters in ``globals.yml`` file. For example:
 You can also set syslog facility names for Swift and HAProxy logs.
 By default, Swift and HAProxy use ``local0`` and ``local1``, respectively.
 
-.. code-block:: none
+.. code-block:: yaml
 
    syslog_swift_facility: "local0"
    syslog_haproxy_facility: "local1"
diff --git a/doc/source/contributor/CONTRIBUTING.rst b/doc/source/contributor/CONTRIBUTING.rst
index 68e1ec425a737c9877ece0b8d0184d17a6b9534a..5b21a048a02b0e09c25a20b94ca4620c21908d17 100644
--- a/doc/source/contributor/CONTRIBUTING.rst
+++ b/doc/source/contributor/CONTRIBUTING.rst
@@ -87,7 +87,7 @@ that Kolla uses throughout that should be followed.
     content:
 
     .. path ansible/roles/common/templates/cron-logrotate-PROJECT.conf.j2
-    .. code-block:: none
+    .. code-block:: console
 
        "/var/log/kolla/PROJECT/*.log"
        {
diff --git a/doc/source/contributor/kolla-for-openstack-development.rst b/doc/source/contributor/kolla-for-openstack-development.rst
index f794f367192a2bca215ba8255e1bd33ceab8088b..db135478feb9ec2e2dfc4b402f0c94fd9a0e42fb 100644
--- a/doc/source/contributor/kolla-for-openstack-development.rst
+++ b/doc/source/contributor/kolla-for-openstack-development.rst
@@ -26,7 +26,7 @@ To enable dev mode for all supported services, set in
 ``/etc/kolla/globals.yml``:
 
 .. path /etc/kolla/globals.yml
-.. code-block:: none
+.. code-block:: yaml
 
    kolla_dev_mode: true
 
@@ -35,7 +35,7 @@ To enable dev mode for all supported services, set in
 To enable it just for heat, set:
 
 .. path /etc/kolla/globals.yml
-.. code-block:: none
+.. code-block:: yaml
 
    heat_dev_mode: true
 
@@ -70,7 +70,7 @@ make sure it is installed in the container in question:
 
 Then, set your breakpoint as follows:
 
-.. code-block:: none
+.. code-block:: python
 
    from remote_pdb import RemotePdb
    RemotePdb('127.0.0.1', 4444).set_trace()
diff --git a/doc/source/reference/bifrost.rst b/doc/source/reference/bifrost.rst
index 6230ad43a08f64fc9ee9adad16a6574c19f9c070..dab76bb69789d60301d4654155d428efbc476364 100644
--- a/doc/source/reference/bifrost.rst
+++ b/doc/source/reference/bifrost.rst
@@ -91,7 +91,7 @@ resolving the deployment host's hostname to ``127.0.0.1``, for example:
 
 The following lines are desirable for IPv6 capable hosts:
 
-.. code-block:: none
+.. code-block:: console
 
     ::1 ip6-localhost ip6-loopback
     fe00::0 ip6-localnet
@@ -109,12 +109,14 @@ Build a Bifrost Container Image
 This section provides instructions on how to build a container image for
 bifrost using kolla.
 
-Currently kolla only supports the ``source`` install type for the bifrost image.
+Currently kolla only supports the ``source`` install type for the
+bifrost image.
 
 #. To generate kolla-build.conf configuration File
 
 
-   * If required, generate a default configuration file for :command:`kolla-build`:
+   * If required, generate a default configuration file for
+     :command:`kolla-build`:
 
      .. code-block:: console
 
diff --git a/doc/source/reference/central-logging-guide.rst b/doc/source/reference/central-logging-guide.rst
index c227c3e31f34d5094ea11987a840391af0a254c3..4ec165817cb348f8e0583e1288c5643a3d3bfad5 100644
--- a/doc/source/reference/central-logging-guide.rst
+++ b/doc/source/reference/central-logging-guide.rst
@@ -95,7 +95,7 @@ In this output, look for the key ``X-Compute-Request-Id``. This is a unique
 identifier that can be used to track the request through the system. An
 example ID looks like this:
 
-.. code-block:: none
+.. code-block:: console
 
    X-Compute-Request-Id: req-c076b50a-6a22-48bf-8810-b9f41176a6d5
 
diff --git a/doc/source/reference/ceph-guide.rst b/doc/source/reference/ceph-guide.rst
index b00334f4fce423991284347fde9b34b880d2329e..63f95198c3394faa1acdb0d691cd696c76093f64 100644
--- a/doc/source/reference/ceph-guide.rst
+++ b/doc/source/reference/ceph-guide.rst
@@ -99,10 +99,10 @@ To prepare the journal external drive execute the following command:
 Configuration
 ~~~~~~~~~~~~~
 
-Edit the ``[storage]`` group in the inventory which contains the hostname of the
-hosts that have the block devices you have prepped as shown above.
+Edit the ``[storage]`` group in the inventory which contains the hostname
+of the hosts that have the block devices you have prepped as shown above.
 
-.. code-block:: none
+.. code-block:: ini
 
    [storage]
    controller
@@ -340,7 +340,7 @@ implement caching.
 Here is the top part of the multinode inventory file used in the example
 environment before adding the 3rd node for Ceph:
 
-.. code-block:: none
+.. code-block:: ini
 
    [control]
    # These hostname must be resolvable from your deployment host
@@ -384,7 +384,7 @@ Next, edit the multinode inventory file and make sure the 3 nodes are listed
 under ``[storage]``. In this example I will add kolla3.ducourrier.com to the
 existing inventory file:
 
-.. code-block:: none
+.. code-block:: ini
 
    [control]
    # These hostname must be resolvable from your deployment host
diff --git a/doc/source/reference/cinder-guide.rst b/doc/source/reference/cinder-guide.rst
index 534dd4c813639e6ea76dd6f43f186dd3926a8abd..ae50271d6b86cede388835caeb94ee2d9b4a7ace 100644
--- a/doc/source/reference/cinder-guide.rst
+++ b/doc/source/reference/cinder-guide.rst
@@ -38,7 +38,7 @@ During development, it may be desirable to use file backed block storage. It
 is possible to use a file and mount it as a block device via the loopback
 system.
 
-.. code-block:: none
+.. code-block:: console
 
    free_device=$(losetup -f)
    fallocate -l 20G /var/lib/cinder_data.img
@@ -67,7 +67,7 @@ NFS
 To use the ``nfs`` backend, configure ``/etc/exports`` to contain the mount
 where the volumes are to be stored:
 
-.. code-block:: none
+.. code-block:: console
 
    /kolla_nfs 192.168.5.0/24(rw,sync,no_root_squash)
 
@@ -89,7 +89,7 @@ Then start ``nfsd``:
 On the deploy node, create ``/etc/kolla/config/nfs_shares`` with an entry for
 each storage node:
 
-.. code-block:: none
+.. code-block:: console
 
    storage01:/kolla_nfs
    storage02:/kolla_nfs
diff --git a/doc/source/reference/external-ceph-guide.rst b/doc/source/reference/external-ceph-guide.rst
index 6f07d7b958a9dd6dae5aef596308fdb852adaf8c..d6b336775c35bf091eb641e17961228404cef010 100644
--- a/doc/source/reference/external-ceph-guide.rst
+++ b/doc/source/reference/external-ceph-guide.rst
@@ -103,7 +103,7 @@ Ceph) into the same directory, for example:
 
 .. end
 
-.. code-block:: none
+.. code-block:: console
 
    $ cat /etc/kolla/config/glance/ceph.client.glance.keyring
 
diff --git a/doc/source/reference/external-mariadb-guide.rst b/doc/source/reference/external-mariadb-guide.rst
index d396479bdc42d821a980a6530524b7cb54754f96..7d1a2989bea3e0953c4f325dd8c611272dbc3df8 100644
--- a/doc/source/reference/external-mariadb-guide.rst
+++ b/doc/source/reference/external-mariadb-guide.rst
@@ -183,8 +183,9 @@ all you need to do is the following steps:
 
    .. end
 
-#. Set the common password for all components within ``/etc/kolla/passwords.yml``.
-   In order to achieve that you could use the following command:
+#. Set the common password for all components within
+   ``/etc/kolla/passwords.yml``. In order to achieve that you
+   could use the following command:
 
    .. code-block:: console
 
diff --git a/doc/source/reference/hyperv-guide.rst b/doc/source/reference/hyperv-guide.rst
index 6ac48860832dee61d42d17d217a0c5b1b70b79e1..026f443bb582c35682f5b01c785dbddc721c40b3 100644
--- a/doc/source/reference/hyperv-guide.rst
+++ b/doc/source/reference/hyperv-guide.rst
@@ -116,7 +116,7 @@ be found on `Cloudbase website
 
 Add the Hyper-V node in ``ansible/inventory`` file:
 
-.. code-block:: none
+.. code-block:: ini
 
    [hyperv]
    <HyperV IP>
diff --git a/doc/source/reference/kuryr-guide.rst b/doc/source/reference/kuryr-guide.rst
index c7a4e9120b20a90d18c4142dc8498ed90cd1a610..12204f62c250b161c55de07541ba7546a6f66064 100644
--- a/doc/source/reference/kuryr-guide.rst
+++ b/doc/source/reference/kuryr-guide.rst
@@ -18,7 +18,7 @@ Preparation and Deployment
 To allow Docker daemon connect to the etcd, add the following in the
 ``docker.service`` file.
 
-.. code-block:: none
+.. code-block:: ini
 
    ExecStart= -H tcp://172.16.1.13:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://172.16.1.13:2379 --cluster-advertise=172.16.1.13:2375
 
diff --git a/doc/source/reference/manila-guide.rst b/doc/source/reference/manila-guide.rst
index 70dcb0b7cda031daa4fec48a6ac216b676b76d55..15d7532adcf0425e3a3c708515dcd3eeafd0f819 100644
--- a/doc/source/reference/manila-guide.rst
+++ b/doc/source/reference/manila-guide.rst
@@ -369,7 +369,8 @@ Use the manila migration command, as shown in the following example:
 Checking share migration progress
 ---------------------------------
 
-Use the :command:`manila migration-get-progress shareID` command to check progress.
+Use the :command:`manila migration-get-progress shareID` command to
+check progress.
 
 .. code-block:: console
 
diff --git a/doc/source/reference/manila-hnas-guide.rst b/doc/source/reference/manila-hnas-guide.rst
index eb3db506c408b1cd337067b935c1e8a7512a4120..3010def64003c104dde3c8e3124a298f440654a0 100644
--- a/doc/source/reference/manila-hnas-guide.rst
+++ b/doc/source/reference/manila-hnas-guide.rst
@@ -360,4 +360,4 @@ For more information about how to manage shares, see the
 
 For more information about how HNAS driver works, see
 `Hitachi NAS Platform File Services Driver for OpenStack
-<https://docs.openstack.org/manila/latest/admin/hitachi_hnas_driver.html>`__.
\ No newline at end of file
+<https://docs.openstack.org/manila/latest/admin/hitachi_hnas_driver.html>`__.
diff --git a/doc/source/reference/networking-guide.rst b/doc/source/reference/networking-guide.rst
index e4c7d89d1a97d39b39cb5692422af57e3aedbfb4..131e923eca25a71c409a92acd668dbe3a09894bc 100644
--- a/doc/source/reference/networking-guide.rst
+++ b/doc/source/reference/networking-guide.rst
@@ -4,9 +4,9 @@
 Networking in Kolla
 ===================
 
-Kolla deploys Neutron by default as OpenStack networking component. This section
-describes configuring and running Neutron extensions like LBaaS, Networking-SFC,
-QoS, and so on.
+Kolla deploys Neutron by default as OpenStack networking component.
+This section describes configuring and running Neutron extensions like
+LBaaS, Networking-SFC, QoS, and so on.
 
 Enabling Provider Networks
 ==========================
@@ -218,7 +218,7 @@ it is advised to allocate them via the kernel command line instead to prevent
 memory fragmentation. This can be achieved by adding the following to the grub
 config and regenerating your grub file.
 
-.. code-block:: none
+.. code-block:: console
 
    default_hugepagesz=2M hugepagesz=2M hugepages=25000
 
@@ -233,16 +233,17 @@ While it is technically possible to use all 3 only ``uio_pci_generic`` and
 and distributed as part of the dpdk library. While it has some advantages over
 ``uio_pci_generic`` loading the ``igb_uio`` module will taint the kernel and
 possibly invalidate distro support. To successfully deploy ``ovs-dpdk``,
-``vfio_pci`` or ``uio_pci_generic`` kernel module must be present on the platform.
-Most distros include ``vfio_pci`` or ``uio_pci_generic`` as part of the default
-kernel though on some distros you may need to install ``kernel-modules-extra`` or
-the distro equivalent prior to running :command:`kolla-ansible deploy`.
+``vfio_pci`` or ``uio_pci_generic`` kernel module must be present on the
+platform. Most distros include ``vfio_pci`` or ``uio_pci_generic`` as part of
+the default kernel though on some distros you may need to install
+``kernel-modules-extra`` or the distro equivalent prior to running
+:command:`kolla-ansible deploy`.
 
 Installation
 ------------
 
-To enable ovs-dpdk, add the following configuration to ``/etc/kolla/globals.yml``
-file:
+To enable ovs-dpdk, add the following configuration to
+``/etc/kolla/globals.yml`` file:
 
 .. code-block:: yaml
 
@@ -308,9 +309,10 @@ Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
 
 .. end
 
-Modify the ``/etc/kolla/config/neutron/ml2_conf.ini`` file and add ``sriovnicswitch``
-to the ``mechanism_drivers``. Also, the provider networks used by SRIOV should be configured.
-Both flat and VLAN are configured with the same physical network name in this example:
+Modify the ``/etc/kolla/config/neutron/ml2_conf.ini`` file and add
+``sriovnicswitch`` to the ``mechanism_drivers``. Also, the provider
+networks used by SRIOV should be configured. Both flat and VLAN are configured
+with the same physical network name in this example:
 
 .. path /etc/kolla/config/neutron/ml2_conf.ini
 .. code-block:: ini
@@ -331,9 +333,9 @@ Add ``PciPassthroughFilter`` to scheduler_default_filters
 The ``PciPassthroughFilter``, which is required by Nova Scheduler service
 on the Controller, should be added to ``scheduler_default_filters``
 
-Modify the ``/etc/kolla/config/nova.conf`` file and add ``PciPassthroughFilter``
-to ``scheduler_default_filters``. this filter is required by The Nova Scheduler
-service on the controller node.
+Modify the ``/etc/kolla/config/nova.conf`` file and add
+``PciPassthroughFilter`` to ``scheduler_default_filters``. this filter is
+required by The Nova Scheduler service on the controller node.
 
 .. path /etc/kolla/config/nova.conf
 .. code-block:: ini
@@ -489,12 +491,12 @@ so in environments that have NICs with multiple ports configured for SRIOV,
 it is impossible to specify a specific NIC port to pull VFs from.
 
 Modify the file ``/etc/kolla/config/nova.conf``.  The Nova Scheduler service
-on the control node requires the ``PciPassthroughFilter`` to be added to the list
-of filters and the Nova Compute service(s) on the compute node(s) need PCI
-device whitelisting.  The Nova API service on the control node and the Nova
+on the control node requires the ``PciPassthroughFilter`` to be added to the
+list of filters and the Nova Compute service(s) on the compute node(s) need
+PCI device whitelisting.  The Nova API service on the control node and the Nova
 Compute service on the compute node also require the ``alias`` option under the
-``[pci]`` section.  The alias can be configured as 'type-VF' to pass VFs or 'type-PF'
-to pass the PF. Type-VF is shown in this example:
+``[pci]`` section.  The alias can be configured as 'type-VF' to pass VFs or
+'type-PF' to pass the PF. Type-VF is shown in this example:
 
 .. path /etc/kolla/config/nova.conf
 .. code-block:: ini
@@ -514,8 +516,8 @@ Run deployment.
 Verification
 ------------
 
-Create (or use an existing) flavor, and then configure it to request one PCI device
-from the PCI alias:
+Create (or use an existing) flavor, and then configure it to request one PCI
+device from the PCI alias:
 
 .. code-block:: console
 
@@ -534,4 +536,5 @@ Start a new instance using the flavor:
 Verify VF devices were created and the instance starts successfully as in
 the Neutron SRIOV case.
 
-For more information see `OpenStack PCI passthrough documentation <https://docs.openstack.org/nova/pike/admin/pci-passthrough.html>`_.
\ No newline at end of file
+For more information see `OpenStack PCI passthrough documentation <https://docs.openstack.org/nova/pike/admin/pci-passthrough.html>`_.
+
diff --git a/doc/source/reference/nova-fake-driver.rst b/doc/source/reference/nova-fake-driver.rst
index 8ea3f4dbccca2145f6cbaf83b17acb3dc65ba2fa..33f0e063241349db9afea59c6c2382a33deab083 100644
--- a/doc/source/reference/nova-fake-driver.rst
+++ b/doc/source/reference/nova-fake-driver.rst
@@ -5,10 +5,10 @@ Nova Fake Driver
 ================
 
 One common question from OpenStack operators is that "how does the control
-plane (for example, database, messaging queue, nova-scheduler ) scales?". To answer
-this question, operators setup Rally to drive workload to the OpenStack cloud.
-However, without a large number of nova-compute nodes, it becomes difficult to
-exercise the control performance.
+plane (for example, database, messaging queue, nova-scheduler ) scales?".
+To answer this question, operators setup Rally to drive workload to the
+OpenStack cloud. However, without a large number of nova-compute nodes,
+it becomes difficult to exercise the control performance.
 
 Given the built-in feature of Docker container, Kolla enables standing up many
 of Compute nodes with nova fake driver on a single host. For example,
@@ -19,9 +19,9 @@ Use nova-fake driver
 ~~~~~~~~~~~~~~~~~~~~
 
 Nova fake driver can not work with all-in-one deployment. This is because the
-fake ``neutron-openvswitch-agent`` for the fake ``nova-compute`` container conflicts
-with ``neutron-openvswitch-agent`` on the Compute nodes. Therefore, in the
-inventory the network node must be different than the Compute node.
+fake ``neutron-openvswitch-agent`` for the fake ``nova-compute`` container
+conflicts with ``neutron-openvswitch-agent`` on the Compute nodes. Therefore,
+in the inventory the network node must be different than the Compute node.
 
 By default, Kolla uses libvirt driver on the Compute node. To use nova-fake
 driver, edit the following parameters in ``/etc/kolla/globals.yml`` or in
@@ -35,5 +35,5 @@ the command line options.
 .. end
 
 Each Compute node will run 5 ``nova-compute`` containers and 5
-``neutron-plugin-agent`` containers. When booting instance, there will be no real
-instances created. But :command:`nova list` shows the fake instances.
+``neutron-plugin-agent`` containers. When booting instance, there will be
+no real instances created. But :command:`nova list` shows the fake instances.
diff --git a/doc/source/reference/swift-guide.rst b/doc/source/reference/swift-guide.rst
index c340713a7ff4ace5bba5c511e58868b520d2ea56..ee98ed9fa52d3b0be3b0445096bd7865e4443ad0 100644
--- a/doc/source/reference/swift-guide.rst
+++ b/doc/source/reference/swift-guide.rst
@@ -82,7 +82,7 @@ table** example listed above. Please modify accordingly if your setup is
 different.
 
 Prepare for Rings generating
----------------------------- 
+----------------------------
 
 To perpare for Swift Rings generating, run the following commands to initialize
 the environment variable and create ``/etc/kolla/config/swift`` directory:
@@ -251,4 +251,4 @@ A very basic smoke test:
    | Bytes      | 6684                                  |
    | Containers | 1                                     |
    | Objects    | 1                                     |
-   +------------+---------------------------------------+
\ No newline at end of file
+   +------------+---------------------------------------+
diff --git a/doc/source/reference/tacker-guide.rst b/doc/source/reference/tacker-guide.rst
index 15d2a7e903cf47eec3ee46d8e8bd05a26a843f21..c1c9bc8706d1d1482d342af0efb6fa644bee8fa7 100644
--- a/doc/source/reference/tacker-guide.rst
+++ b/doc/source/reference/tacker-guide.rst
@@ -190,4 +190,4 @@ can be cleaned up executing ``cleanup-tacker`` script.
 
    $ sh cleanup-tacker
 
-.. end
\ No newline at end of file
+.. end
diff --git a/doc/source/reference/vmware-guide.rst b/doc/source/reference/vmware-guide.rst
index d77d8b52af8aa5b8db2875d6377ca9b75dda8ac9..737429a5193f01037672b091f3c1a8721fd17898 100644
--- a/doc/source/reference/vmware-guide.rst
+++ b/doc/source/reference/vmware-guide.rst
@@ -61,9 +61,9 @@ For more information, please see `VMware NSX-V documentation <https://docs.vmwar
    In addition, it is important to modify the firewall rule of vSphere to make
    sure that VNC is accessible from outside VMware environment.
 
-   On every VMware host, edit /etc/vmware/firewall/vnc.xml as below:
+   On every VMware host, edit ``/etc/vmware/firewall/vnc.xml`` as below:
 
-.. code-block:: none
+.. code-block:: xml
 
    <!-- FirewallRule for VNC Console -->
    <ConfigRoot>
@@ -216,7 +216,8 @@ Options for Neutron NSX-V support:
 
    .. end
 
-Then you should start :command:`kolla-ansible` deployment normally as KVM/QEMU deployment.
+Then you should start :command:`kolla-ansible` deployment normally as
+KVM/QEMU deployment.
 
 
 VMware NSX-DVS
@@ -293,7 +294,8 @@ Options for Neutron NSX-DVS support:
 
    .. end
 
-Then you should start :command:`kolla-ansible` deployment normally as KVM/QEMU deployment.
+Then you should start :command:`kolla-ansible` deployment normally as
+KVM/QEMU deployment.
 
 For more information on OpenStack vSphere, see
 `VMware vSphere
diff --git a/doc/source/reference/zun-guide.rst b/doc/source/reference/zun-guide.rst
index a72aa3b218a616717c6ef5176ea815304e8c677f..d4b65bbf16eeb173124cd8b8a987203ec27b1dca 100644
--- a/doc/source/reference/zun-guide.rst
+++ b/doc/source/reference/zun-guide.rst
@@ -17,7 +17,7 @@ configure kuryr refer to :doc:`kuryr-guide`.
 To allow Zun Compute connect to the Docker Daemon, add the following in the
 ``docker.service`` file on each zun-compute node.
 
-.. code-block:: none
+.. code-block:: ini
 
    ExecStart= -H tcp://<DOCKER_SERVICE_IP>:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://<DOCKER_SERVICE_IP>:2379 --cluster-advertise=<DOCKER_SERVICE_IP>:2375
 
diff --git a/doc/source/user/multi-regions.rst b/doc/source/user/multi-regions.rst
index 73e05d7314653536bccb399c48ed0c59f123a476..ad44a3051420f8ba7225e49a830b82d122967d14 100644
--- a/doc/source/user/multi-regions.rst
+++ b/doc/source/user/multi-regions.rst
@@ -39,7 +39,7 @@ regions. In this example, we consider two regions. The current one,
 formerly knows as RegionOne, that is hided behind
 ``openstack_region_name`` variable, and the RegionTwo:
 
-.. code-block:: none
+.. code-block:: yaml
 
    openstack_region_name: "RegionOne"
    multiple_regions_names:
@@ -69,7 +69,7 @@ update the ``/etc/kolla/globals.yml`` configuration file to tell Kolla how
 to reach Keystone. In the following, ``kolla_internal_fqdn_r1`` refers to
 the value of ``kolla_internal_fqdn`` in RegionOne:
 
-.. code-block:: none
+.. code-block:: yaml
 
    kolla_internal_fqdn_r1: 10.10.10.254
 
@@ -142,7 +142,7 @@ directory, a ``ceilometer.conf`` file with below content:
 And link the directory that contains these files into the
 ``/etc/kolla/globals.yml``:
 
-.. code-block:: none
+.. code-block:: yaml
 
    node_custom_config: path/to/the/directory/of/global&nova_conf/
 
@@ -150,7 +150,7 @@ And link the directory that contains these files into the
 
 Also, change the name of the current region. For instance, RegionTwo:
 
-.. code-block:: none
+.. code-block:: yaml
 
    openstack_region_name: "RegionTwo"
 
@@ -159,7 +159,7 @@ Also, change the name of the current region. For instance, RegionTwo:
 Finally, disable the deployment of Keystone and Horizon that are
 unnecessary in this region and run ``kolla-ansible``:
 
-.. code-block:: none
+.. code-block:: yaml
 
    enable_keystone: "no"
    enable_horizon: "no"
diff --git a/doc/source/user/multinode.rst b/doc/source/user/multinode.rst
index 9705a61d6a42bc816343a05a286aaa717ca74c6b..bff8f430cfb5692596170d75ffa1f8d7f7d030b4 100644
--- a/doc/source/user/multinode.rst
+++ b/doc/source/user/multinode.rst
@@ -24,9 +24,9 @@ Edit the ``/etc/kolla/globals.yml`` and add the following where 192.168.1.100
 is the IP address of the machine and 5000 is the port where the registry is
 currently running:
 
-.. code-block:: none
+.. code-block:: yaml
 
-   docker_registry = 192.168.1.100:5000
+   docker_registry: 192.168.1.100:5000
 
 .. end
 
@@ -185,7 +185,7 @@ controls how ansible interacts with remote hosts.
    information about SSH authentication please reference
    `Ansible documentation <http://docs.ansible.com/ansible/intro_inventory.html>`__.
 
-.. code-block:: none
+.. code-block:: ini
 
    # These initial groups are the only groups required to be modified. The
    # additional groups are for more control of the environment.
@@ -208,7 +208,7 @@ For more advanced roles, the operator can edit which services will be
 associated in with each group. Keep in mind that some services have to be
 grouped together and changing these around can break your deployment:
 
-.. code-block:: none
+.. code-block:: ini
 
    [kibana:children]
    control
diff --git a/doc/source/user/operating-kolla.rst b/doc/source/user/operating-kolla.rst
index bca548d88495a4c6ff0b7a8ffa5bc0611d5911f5..ce870c32a19fdca605df54664fbbeafcaa1497ac 100644
--- a/doc/source/user/operating-kolla.rst
+++ b/doc/source/user/operating-kolla.rst
@@ -72,8 +72,8 @@ While there may be some cases where it is possible to upgrade by skipping this
 step (i.e. by upgrading only the ``openstack_release`` version) - generally,
 when looking at a more comprehensive upgrade, the kolla-ansible package itself
 should be upgraded first. This will include reviewing some of the configuration
-and inventory files. On the operator/master node, a backup of the ``/etc/kolla``
-directory may be desirable.
+and inventory files. On the operator/master node, a backup of the
+``/etc/kolla`` directory may be desirable.
 
 If upgrading from ``5.0.0`` to ``6.0.0``, upgrade the kolla-ansible package:
 
@@ -83,8 +83,8 @@ If upgrading from ``5.0.0`` to ``6.0.0``, upgrade the kolla-ansible package:
 
 .. end
 
-If this is a minor upgrade, and you do not wish to upgrade kolla-ansible itself,
-you may skip this step.
+If this is a minor upgrade, and you do not wish to upgrade kolla-ansible
+itself, you may skip this step.
 
 The inventory file for the deployment should be updated, as the newer sample
 inventory files may have updated layout or other relevant changes.
@@ -101,15 +101,16 @@ In addition the ``6.0.0`` sample configuration files should be taken from::
     # Ubuntu
     /usr/local/share/kolla-ansible/etc_examples/kolla
 
-At this stage, files that are still at the ``5.0.0`` version - which need manual
-updating are:
+At this stage, files that are still at the ``5.0.0`` version - which need
+manual updating are:
 
 - ``/etc/kolla/globals.yml``
 - ``/etc/kolla/passwords.yml``
 
 For ``globals.yml`` relevant changes should be merged into a copy of the new
 template, and then replace the file in ``/etc/kolla`` with the updated version.
-For ``passwords.yml``, see the ``kolla-mergepwd`` instructions in `Tips and Tricks`.
+For ``passwords.yml``, see the ``kolla-mergepwd`` instructions in
+`Tips and Tricks`.
 
 For the kolla docker images, the ``openstack_release`` is updated to ``6.0.0``:
 
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index ed0c01b0c53b833dceab9ebbfd1cf430888d4257..96777cbe9d8bb3276f583b9f64ab5b88a9242e6d 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -204,8 +204,8 @@ Install Kolla for development
    .. end
 
 #. Copy the inventory files to the current directory. ``kolla-ansible`` holds
-   inventory files ( ``all-in-one`` and ``multinode``) in the ``ansible/inventory``
-   directory.
+   inventory files ( ``all-in-one`` and ``multinode``) in the
+   ``ansible/inventory`` directory.
 
    .. code-block:: console
 
@@ -230,7 +230,7 @@ than one node, edit ``multinode`` inventory:
 #. Edit the first section of ``multinode`` with connection details of your
    environment, for example:
 
-   .. code-block:: none
+   .. code-block:: ini
 
       [control]
       10.0.0.[10:12] ansible_user=ubuntu ansible_password=foobar ansible_become=true
diff --git a/doc/source/user/security.rst b/doc/source/user/security.rst
index e5a7c989808bab3019520796e62c465aadd7dafb..e61b2410ba3adb812ccd217c267c96c8641ed46e 100644
--- a/doc/source/user/security.rst
+++ b/doc/source/user/security.rst
@@ -71,8 +71,8 @@ necessary tasks. In Rocky, all services have this capability, so users do not
 need to add ``ansible_become`` option if connection user has passwordless sudo
 capability.
 
-Prior to Rocky, ``ansible_user`` (the user which Ansible uses to connect via SSH)
-is default configuration owner and group in target nodes.
+Prior to Rocky, ``ansible_user`` (the user which Ansible uses to connect
+via SSH) is default configuration owner and group in target nodes.
 From Rocky release, Kolla support connection using any user which has
-passwordless sudo capability. For setting custom owner user and group, user can
-set ``config_owner_user`` and ``config_owner_group`` in ``globals.yml``
+passwordless sudo capability. For setting custom owner user and group, user
+can set ``config_owner_user`` and ``config_owner_group`` in ``globals.yml``.
diff --git a/setup.cfg b/setup.cfg
index 27f087dba910ed6108727e13573cb8f6a517a057..c4c664239bade9ca8926a5c84cecf7fce24223ef 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -49,14 +49,6 @@ console_scripts =
 setup-hooks =
     pbr.hooks.setup_hook
 
-[pbr]
-
-[build_sphinx]
-all_files = 1
-build-dir = doc/build
-source-dir = doc/source
-warning-is-error = 1
-
 [build_releasenotes]
 all_files = 1
 build-dir = releasenotes/build
diff --git a/test-requirements.txt b/test-requirements.txt
index 2dbc440869d220a022e864352dabdc7f24258687..f242c770484aa1baf054a82b2f8a9c9fbf7784ed 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,14 +11,12 @@ hacking>=0.10.0,<1.1.0
 openstackdocstheme>=1.18.1 # Apache-2.0
 oslo.log>=3.36.0 # Apache-2.0
 oslotest>=3.2.0 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
 PrettyTable<0.8,>=0.7.1 # BSD
 PyYAML>=3.12 # MIT
 python-ceilometerclient>=2.5.0 # Apache-2.0
 python-neutronclient>=6.7.0 # Apache-2.0
 python-openstackclient>=3.12.0 # Apache-2.0
 pytz>=2013.6 # MIT
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
 testrepository>=0.0.18 # Apache-2.0/BSD
 testscenarios>=0.4 # Apache-2.0/BSD
 testtools>=2.2.0 # MIT
diff --git a/tox.ini b/tox.ini
index d24f265c559a1f86a3a74a6447870c3f0f1d3b1a..44fdaebc8fbe7bb67e67a8f1e063c7851c3a5ebe 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,8 +7,9 @@ envlist = py35,py27,pep8,pypy
 usedevelop=True
 whitelist_externals = find
                       rm
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
-deps = -r{toxinidir}/requirements.txt
+install_command = pip install {opts} {packages}
+deps = -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+       -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
           OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE OS_TEST_TIMEOUT
@@ -30,12 +31,15 @@ setenv = VIRTUAL_ENV={envdir}
 commands = python setup.py testr --coverage --testr-args='{posargs}'
 
 [testenv:pep8]
+# sphinx needs to be installed to make doc8 work properly
 deps =
     {[testenv]deps}
+    -r{toxinidir}/doc/requirements.txt
     yamllint
 commands =
   {toxinidir}/tools/run-bashate.sh
   flake8 {posargs}
+  doc8 doc
   python {toxinidir}/tools/validate-all-file.py
   bandit -r ansible kolla_ansible tests tools
   yamllint .
@@ -44,16 +48,30 @@ commands =
 commands = bandit -r ansible kolla_ansible tests tools
 
 [testenv:venv]
+deps =
+  -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+  -r{toxinidir}/test-requirements.txt
+  -r{toxinidir}/doc/requirements.txt
 commands = {posargs}
 
 [testenv:docs]
+deps =
+   -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+   -r{toxinidir}/requirements.txt
+   -r{toxinidir}/doc/requirements.txt
 commands =
   rm -rf doc/build
-  doc8 doc
-  python setup.py build_sphinx
+  sphinx-build -W -b html doc/source doc/build/html
 
 [testenv:deploy-guide]
-commands = sphinx-build -a -E -W -d deploy-guide/build/doctrees -b html deploy-guide/source deploy-guide/build/html
+deps =
+   -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+   -r{toxinidir}/requirements.txt
+   -r{toxinidir}/doc/requirements.txt
+
+commands =
+  rm -rf deploy-guide/build
+  sphinx-build -a -E -W -d deploy-guide/build/doctrees -b html deploy-guide/source deploy-guide/build/html
 
 [testenv:setupenv]
 commands =
@@ -61,6 +79,10 @@ commands =
   {toxinidir}/tools/dump_info.sh
 
 [testenv:releasenotes]
+deps =
+   -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+   -r{toxinidir}/requirements.txt
+   -r{toxinidir}/doc/requirements.txt
 commands =
   rm -rf releasenotes/build
   sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html