Skip to content
Snippets Groups Projects
Commit 99c7eb19 authored by Sam Yaple's avatar Sam Yaple
Browse files

Make OpenStack use Ceph

This implements all the openstack pieces needed to make ceph work.

DocImpact
Change-Id: I1d24476a966602cf955e5ef872b0efb01319894a
Partially-Implements: blueprint ceph-container
Implements: blueprint kolla-live-migration
parent a1b0518b
No related branches found
No related tags found
No related merge requests found
Showing
with 333 additions and 10 deletions
......@@ -163,4 +163,4 @@ haproxy_user: "openstack"
#################################
# Cinder - Block Storage options
#################################
cinder_volume_driver: "lvm"
cinder_volume_driver: "{{ 'ceph' if enable_ceph | bool else 'lvm' }}"
---
dependencies:
- { role: common, project_yaml: 'cinder.yml' }
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
with_items:
- "cinder-volume"
- "cinder-backup"
when: inventory_hostname in groups['cinder-volume']
- name: Copying over config(s)
template:
src: roles/ceph/templates/ceph.conf.j2
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
with_items:
- "cinder-volume"
- "cinder-backup"
when: inventory_hostname in groups['cinder-volume']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for cinder
command: docker exec -it ceph_mon ceph osd pool create volumes 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for cinder-backup
command: docker exec -it ceph_mon ceph osd pool create backups 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder
command: docker exec -it ceph_mon ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
register: cephx_key_cinder
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for cinder-backup
command: docker exec -it ceph_mon ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
register: cephx_key_cinder_backup
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
- name: Pushing cephx keyring
copy:
content: "{{ item.content }}\n\r"
dest: "{{ node_config_directory }}/{{ item.service_name }}/ceph.client.{{ item.key_name }}.keyring"
mode: "0600"
with_items:
- { service_name: "cinder-volume", key_name: "cinder", content: "{{ cephx_key_cinder.stdout }}" }
- { service_name: "cinder-backup", key_name: "cinder-backup", content: "{{ cephx_key_cinder_backup.stdout }}" }
when: inventory_hostname in groups['cinder-volume']
---
- include: ceph.yml
when: enable_ceph | bool
- include: register.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
- include: config.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
- include: bootstrap.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
- include: start.yml
when: inventory_hostname in groups['cinder-api'] or
inventory_hostname in groups['cinder-volume'] or
inventory_hostname in groups['cinder-scheduler'] or
inventory_hostname in groups['cinder-backup']
......@@ -6,6 +6,18 @@
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
}
}{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/ceph.client.cinder.keyring",
"dest": "/etc/ceph/ceph.client.cinder.keyring",
"owner": "cinder",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "cinder",
"perm": "0600"
}{% endif %}
]
}
[DEFAULT]
verbose = true
debug = true
use_syslog = True
syslog_log_facility = LOG_LOCAL0
enable_v1_api=false
volume_name_template = %s
glance_api_servers = http://{{ kolla_internal_address }}:{{ glance_api_port }}
glance_api_version = 2
os_region_name = {{ openstack_region_name }}
{% if cinder_volume_driver == "lvm" %}
default_volume_type = lvmdriver-1
enabled_backends = lvmdriver-1
......@@ -14,10 +20,24 @@ enabled_backends = lvmdriver-1
default_volume_type = rbd-1
enabled_backends = rbd-1
{% endif %}
{% if service_name == "cinder-backup" and cinder_volume_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
{% endif %}
osapi_volume_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
osapi_volume_listen_port = {{ cinder_api_port }}
api_paste_config = /etc/cinder/api-paste.ini
nova_catalog_info = compute:nova:internalURL
auth_strategy = keystone
[database]
......@@ -52,7 +72,12 @@ volume_backend_name = lvmdriver-1
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = {{ metadata_secret }}
rbd_secret_uuid = {{ rbd_secret_uuid }}
{% endif %}
......@@ -2,4 +2,3 @@
- include: config.yml
- include: start.yml
---
dependencies:
- { role: common }
- { role: common, project_yaml: 'glance.yml' }
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/glance-api"
state: "directory"
when: inventory_hostname in groups['glance-api']
- name: Copying over config(s)
template:
src: roles/ceph/templates/ceph.conf.j2
dest: "{{ node_config_directory }}/glance-api/ceph.conf"
when: inventory_hostname in groups['glance-api']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for glance
command: docker exec -it ceph_mon ceph osd pool create images 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring
command: docker exec -it ceph_mon ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
- name: Pushing cephx keyring
copy:
content: "{{ cephx_key.stdout }}\n\r"
dest: "{{ node_config_directory }}/glance-api/ceph.client.glance.keyring"
mode: "0600"
when: inventory_hostname in groups['glance-api']
---
- include: ceph.yml
when: enable_ceph | bool
- include: register.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
- include: config.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
- include: bootstrap.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
- include: start.yml
when: inventory_hostname in groups['glance-api'] or
inventory_hostname in groups['glance-registry']
......@@ -12,6 +12,10 @@ registry_host = {{ kolla_internal_address }}
use_syslog = True
syslog_log_facility = LOG_LOCAL0
{% if enable_ceph | bool %}
show_image_direct_url= True
{% endif %}
[database]
connection = mysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
......@@ -29,5 +33,13 @@ password = {{ glance_keystone_password }}
flavor = keystone
[glance_store]
{% if enable_ceph | bool %}
default_store = rbd
stores = rbd
rbd_store_user = glance
rbd_store_pool = images
rbd_store_chunk_size = 8
{% else %}
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
{% endif %}
......@@ -6,6 +6,18 @@
"dest": "/etc/glance/glance-api.conf",
"owner": "glance",
"perm": "0600"
}
}{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/ceph.client.glance.keyring",
"dest": "/etc/ceph/ceph.client.glance.keyring",
"owner": "glance",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "glance",
"perm": "0600"
}{% endif %}
]
}
---
dependencies:
- { role: common }
- { role: common, project_yaml: 'nova.yml' }
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
with_items:
- "nova-compute"
- "nova-libvirt/secrets"
when: inventory_hostname in groups['compute']
- name: Copying over config(s)
template:
src: roles/ceph/templates/ceph.conf.j2
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
with_items:
- "nova-compute"
- "nova-libvirt"
when: inventory_hostname in groups['compute']
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Creating ceph pool for vms
command: docker exec -it ceph_mon ceph osd pool create vms 128
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
failed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for nova
command: docker exec -it ceph_mon ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
register: cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
# TODO(SamYaple): Improve failed_when and changed_when tests
- name: Pulling cephx keyring for libvirt
command: docker exec -it ceph_mon ceph auth get-key client.nova
register: cephx_raw_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
changed_when: False
run_once: True
- name: Pushing cephx keyring for nova
copy:
content: "{{ cephx_key.stdout }}\n\r"
dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
mode: "0600"
when: inventory_hostname in groups['compute']
- name: Pushing secrets xml for libvirt
template:
src: roles/nova/templates/secret.xml.j2
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.xml"
mode: "0600"
when: inventory_hostname in groups['compute']
- name: Pushing secrets key for libvirt
copy:
content: "{{ cephx_raw_key.stdout }}"
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.base64"
mode: "0600"
when: inventory_hostname in groups['compute']
......@@ -129,6 +129,12 @@
dest: "{{ node_config_directory }}/nova-compute/config.json"
when: inventory_hostname in groups['compute']
- name: Copying over config(s)
template:
src: "libvirtd.conf.j2"
dest: "{{ node_config_directory }}/nova-libvirt/libvirtd.conf"
when: inventory_hostname in groups['compute']
- include: ../../config.yml
vars:
service_name: "nova-novncproxy"
......
---
- include: ceph.yml
when: enable_ceph | bool
- include: register.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
- include: config.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
- include: bootstrap.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
- include: start.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-consoleauth'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler'] or
inventory_hostname in groups['nova-compute']
......@@ -19,6 +19,7 @@
- "{{ node_config_directory }}/nova-libvirt/:/opt/kolla/config_files/:ro"
- "/run:/run"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "/lib/modules:/lib/modules:ro"
volumes_from:
- nova_data
env:
......
listen_tcp = 1
auth_tcp = "none"
ca_file = ""
log_level = 2
log_outputs = "2:file:/var/log/libvirt/libvirtd.log"
listen_addr = "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
unix_sock_group = "root"
unix_sock_ro_perms = "0777"
unix_sock_rw_perms = "0770"
auth_unix_ro = "none"
auth_unix_rw = "none"
......@@ -6,6 +6,18 @@
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}
}{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/ceph.client.nova.keyring",
"dest": "/etc/ceph/ceph.client.nova.keyring",
"owner": "nova",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "nova",
"perm": "0600"
}{% endif %}
]
}
{
"command": "/usr/sbin/libvirtd",
"config_files": []
"command": "/usr/sbin/libvirtd -l",
"config_files": [
{
"source": "/opt/kolla/config_files/libvirt.conf",
"dest": "/etc/libvirt/libvirtd.conf",
"owner": "root",
"perm": "0600"
}{% if enable_ceph | bool %},
{
"source": "/opt/kolla/config_files/secrets",
"dest": "/etc/libvirt/secrets",
"owner": "root",
"perm": "0600"
},
{
"source": "/opt/kolla/config_files/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "root",
"perm": "0600"
}{% endif %}
]
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment