Newer
Older
# "sriovnet1" and tunnel_interface used here as placeholders
neutron_sriov_physnet_mappings:
sriovnet1: "{{ tunnel_interface }}"
neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
#######################
# Nova options
#######################
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
# Valid options are [ kvm, qemu, vmware ]
nova_instance_datadir_volume: "nova_compute"
# Valid options are [ none, novnc, spice ]
#######################
# Murano options
#######################
murano_agent_rabbitmq_vhost: "muranoagent"
murano_agent_rabbitmq_user: "muranoagent"
#######################
# Horizon options
#######################
horizon_backend_database: "{{ enable_murano | bool }}"
# Enable deploying custom horizon policy files for services that don't have a
# horizon plugin but have a policy file. Override these when you have services
# not deployed by kolla-ansible but want custom policy files deployed for them
# in horizon.
enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}"
enable_cinder_horizon_policy_file: "{{ enable_cinder }}"
enable_glance_horizon_policy_file: "{{ enable_glance }}"
enable_heat_horizon_policy_file: "{{ enable_heat }}"
enable_keystone_horizon_policy_file: "{{ enable_keystone }}"
enable_neutron_horizon_policy_file: "{{ enable_neutron }}"
enable_nova_horizon_policy_file: "{{ enable_nova }}"
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
horizon_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port }}"
horizon_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_external | bool else horizon_port }}"
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
external_ceph_cephx_enabled: "yes"
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
ceph_glance_pool_name: "images"
ceph_cinder_backup_user: "cinder-backup"
ceph_cinder_user: "cinder"
ceph_glance_user: "glance"
ceph_gnocchi_user: "gnocchi"
ceph_manila_user: "manila"
# External Ceph keyrings
ceph_cinder_keyring: "ceph.client.cinder.keyring"
ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
ceph_glance_keyring: "ceph.client.glance.keyring"
ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
ceph_manila_keyring: "ceph.client.manila.keyring"
#####################
# VMware support
######################
vmware_vcenter_host_ip: "127.0.0.1"
vmware_vcenter_host_username: "username"
vmware_vcenter_cluster_name: "cluster-1"
vmware_vcenter_insecure: "True"
#############################################
# MariaDB component-specific database details
#############################################
# Whether to configure haproxy to load balance
# the external MariaDB server(s)
enable_external_mariadb_load_balancer: "no"
# Whether to use pre-configured databases / users
use_preconfigured_databases: "no"
# whether to use a common, preconfigured user
# for all component databases
use_common_mariadb_user: "no"
############
# Prometheus
############
enable_prometheus_server: "{{ enable_prometheus | bool }}"
enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}"
enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}"
enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"
enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}"
prometheus_alertmanager_user: "admin"
prometheus_openstack_exporter_interval: "60s"
prometheus_openstack_exporter_timeout: "10s"
prometheus_elasticsearch_exporter_interval: "60s"
prometheus_openstack_exporter_endpoint_type: "internal"
prometheus_libvirt_exporter_interval: "60s"
############
# Vitrage
############
enable_vitrage_prometheus_datasource: "{{ enable_prometheus | bool }}"
####################
# InfluxDB options
####################
influxdb_address: "{{ kolla_internal_fqdn }}"
infuxdb_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ influxdb_http_port }}"
#################
# Kafka options
#################
kafka_datadir_volume: "kafka"
# The number of brokers in a Kafka cluster. This is used for automatically
# setting quantities such as topic replicas and it is not recommended to
# change it unless you know what you are doing.
kafka_broker_count: "{{ groups['kafka'] | length }}"
#########################
# Internal Image options
#########################
distro_python_version_map: {
}
distro_python_version: "{{ distro_python_version_map[kolla_base_distro] }}"
##########
# Telegraf
##########
# Configure telegraf to use the docker daemon itself as an input for
# telemetry data.
telegraf_enable_docker_input: "no"
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
vitrage_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
vitrage_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
####################
# Grafana
####################
grafana_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
grafana_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
#############
# Ironic
#############
ironic_internal_endpoint: "{{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
ironic_public_endpoint: "{{ public_protocol }}://{{ ironic_external_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
########
# Swift
########
swift_internal_base_endpoint: "{{ internal_protocol }}://{{ swift_internal_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}"
swift_internal_endpoint: "{{ swift_internal_base_endpoint }}/v1/AUTH_%(tenant_id)s"
swift_public_endpoint: "{{ public_protocol }}://{{ swift_external_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}/v1/AUTH_%(tenant_id)s"
##########
# Octavia
##########
# Whether to run Kolla-Ansible's automatic configuration for Octavia.
# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no`
# and keep your other Octavia config like before.
octavia_auto_configure: yes
# Octavia network type options are [ tenant, provider ]
# * tenant indicates that we will create a tenant network and a network
# interface on the Octavia worker nodes for communication with amphorae.
# * provider indicates that we will create a flat or vlan provider network.
# In this case octavia_network_interface should be set to a network interface
# on the Octavia woker nodes on the same provider network.
octavia_network_type: "provider"
octavia_internal_endpoint: "{{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
octavia_public_endpoint: "{{ public_protocol }}://{{ octavia_external_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
###################################
# Identity federation configuration
###################################
# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone.
# We require the administrator to enter the following metadata:
# * name (internal name of the IdP in Keystone);
# * openstack_domain (the domain in Keystone that the IdP belongs to)
# * protocol (the federated protocol used by the IdP; e.g. openid or saml);
# * identifier (the IdP identifier; e.g. https://accounts.google.com);
# * public_name (the public name that will be shown for users in Horizon);
# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration);
# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol
# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf,
# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}');
# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem';
# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid);
#
# The IdPs meta information are to be presented to Kolla-Ansible as the following example:
# keystone_identity_providers:
# - name: "myidp1"
# openstack_domain: "my-domain"
# protocol: "openid"
# identifier: "https://accounts.google.com"
# public_name: "Authenticate via myidp1"
# attribute_mapping: "mappingId1"
# metadata_folder: "path/to/metadata/folder"
# certificate_file: "path/to/certificate/file.pem"
#
# We also need to configure the attribute mapping that is used by IdPs.
# The configuration of attribute mappings is a list of objects, where each
# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP
# object in the IdPs set), and the 'file' with a full qualified path to a mapping file.
# keystone_identity_mappings:
# - name: "mappingId1"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1"
# - name: "mappingId2"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2"
# - name: "mappingId3"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3"
keystone_identity_providers: []
keystone_identity_mappings: []
####################
# Corosync options
####################
# this is UDP port
hacluster_corosync_port: 5405