Skip to content
Snippets Groups Projects
  • Pierre Riteau's avatar
    c8177202
    Reduce the use of SQLAlchemy connection pooling · c8177202
    Pierre Riteau authored
    When the internal VIP is moved in the event of a failure of the active
    controller, OpenStack services can become unresponsive as they try to
    talk with MariaDB using connections from the SQLAlchemy pool.
    
    It has been argued that OpenStack doesn't really need to use connection
    pooling with MariaDB [1]. This commit reduces the use of connection
    pooling via two configuration options:
    
    - max_pool_size is set to 1 to allow only a single connection in the
      pool (it is not possible to disable connection pooling entirely via
      oslo.db, and max_pool_size = 0 means unlimited pool size)
    - lower connection_recycle_time from the default of one hour to 10
      seconds, which means the single connection in the pool will be
      recreated regularly
    
    These settings have shown better reactivity of the system in the event
    of a failover.
    
    [1] http://lists.openstack.org/pipermail/openstack-dev/2015-April/061808.html
    
    Change-Id: Ib6a62d4428db9b95569314084090472870417f3d
    Closes-Bug: #1896635
    c8177202
    History
    Reduce the use of SQLAlchemy connection pooling
    Pierre Riteau authored
    When the internal VIP is moved in the event of a failure of the active
    controller, OpenStack services can become unresponsive as they try to
    talk with MariaDB using connections from the SQLAlchemy pool.
    
    It has been argued that OpenStack doesn't really need to use connection
    pooling with MariaDB [1]. This commit reduces the use of connection
    pooling via two configuration options:
    
    - max_pool_size is set to 1 to allow only a single connection in the
      pool (it is not possible to disable connection pooling entirely via
      oslo.db, and max_pool_size = 0 means unlimited pool size)
    - lower connection_recycle_time from the default of one hour to 10
      seconds, which means the single connection in the pool will be
      recreated regularly
    
    These settings have shown better reactivity of the system in the event
    of a failover.
    
    [1] http://lists.openstack.org/pipermail/openstack-dev/2015-April/061808.html
    
    Change-Id: Ib6a62d4428db9b95569314084090472870417f3d
    Closes-Bug: #1896635
heat.conf.j2 3.14 KiB
[DEFAULT]
debug = {{ heat_logging_debug }}

log_dir = /var/log/kolla/heat
log_file = $log_dir/{{ service_name }}.log
heat_metadata_server_url = {{ heat_cfn_public_base_endpoint }}
heat_waitcondition_server_url = {{ heat_cfn_public_base_endpoint }}/v1/waitcondition

heat_stack_user_role = {{ heat_stack_user_role }}

stack_domain_admin = heat_domain_admin
stack_domain_admin_password = {{ heat_domain_admin_password }}
stack_user_domain_name = heat_user_domain

{% if service_name == 'heat-engine' %}
num_engine_workers = {{ openstack_service_workers }}
{% endif %}

transport_url = {{ rpc_transport_url }}

region_name_for_services = {{ openstack_region_name }}

server_keystone_endpoint_type = public

{% if service_name == 'heat-api' %}
[heat_api]
bind_host = {{ api_interface_address }}
bind_port = {{ heat_api_listen_port }}
workers = {{ openstack_service_workers }}
{% endif %}

{% if service_name == 'heat-api-cfn' %}
[heat_api_cfn]
bind_host = {{ api_interface_address }}
bind_port = {{ heat_api_cfn_listen_port }}
workers = {{ openstack_service_workers }}
{% endif %}

[database]
connection = mysql+pymysql://{{ heat_database_user }}:{{ heat_database_password }}@{{ heat_database_address }}/{{ heat_database_name }}
connection_recycle_time = {{ database_connection_recycle_time }}
max_pool_size = {{ database_max_pool_size }}
max_retries = -1

[keystone_authtoken]
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ heat_keystone_user }}
password = {{ heat_keystone_password }}
cafile = {{ openstack_cacert }}

memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}


[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}


[trustee]
auth_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
user_domain_id = {{ default_user_domain_id }}
username = {{ heat_keystone_user }}
password = {{ heat_keystone_password }}

[ec2authtoken]
auth_uri = {{ keystone_internal_url }}/v3

[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
{% if heat_enabled_notification_topics %}
driver = messagingv2
topics = {{ heat_enabled_notification_topics | map(attribute='name') | join(',') }}
{% else %}
driver = noop
{% endif %}

{% if heat_policy_file is defined %}
[oslo_policy]
policy_file = {{ heat_policy_file }}
{% endif %}

[clients]
endpoint_type = internalURL
ca_file = {{ openstack_cacert }}

[oslo_middleware]
enable_proxy_headers_parsing = True

{% if enable_osprofiler | bool %}
[profiler]
enabled = true
trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}