Files
openstack-ansible/playbooks/common-playbooks/nova.yml
Dmitriy Rabotyagov 9fca8555df Move repo_packages to group_vars
At the moment it's not possible to apply different versions of
services to the different groups due to playbook vars having
prescedence over group_vars. However, it can be quite important
to  such use cases, for example for phased rollouts of newer versions.

This will also reduce amount of unnecessary variables that are included
for each host, since only required git details will be loaded.

Closes-Bug: #2007296
Depends-On: https://review.opendev.org/c/openstack/openstack-ansible-os_rally/+/881954
Change-Id: Icaa3a958926d9f9aa6cb649bd9f3da9449dd7490
2023-05-18 08:49:51 +00:00

179 lines
5.7 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Gather nova facts
hosts: "{{ nova_hosts }}"
gather_facts: false
tags:
- always
tasks:
- name: Gather minimal facts for nova
setup:
gather_subset:
- "!all"
- min
when: osa_gather_facts | default(True)
- name: Gather additional facts for nova
setup:
gather_subset: "{{ nova_gather_subset | default('processor_count') }}"
filter: "{{ nova_gather_filter | default('ansible_processor*') }}"
when: osa_gather_facts | default(True)
- name: Install nova services
hosts: "{{ nova_hosts }}"
serial: "{{ nova_serial }}"
gather_facts: false
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
vars_files:
- "../defaults/{{ install_method }}_install.yml"
tags:
- nova
pre_tasks:
# Enable execution of ceph_client on the nova compute hosts if cinder RBD
# backends are used. This is necessary to ensure that volume-backed Nova
# instances can function when RBD is the volume backend.
- name: Set cinder RBD inuse fact
set_fact:
nova_cinder_rbd_inuse: "{{ True in groups['cinder_volume'] | map('extract', hostvars, 'cinder_backend_rbd_inuse') }}"
delegate_to: localhost
delegate_facts: True
when:
- "'nova_compute' in group_names"
- "inventory_hostname == ((groups['nova_compute'] | intersect(ansible_play_hosts)) | list)[0]"
- "hostvars['localhost']['nova_cinder_rbd_inuse'] is not defined"
tags:
- always
# In order to ensure that any container, software or
# config file changes which causes a container/service
# restart do not cause an unexpected outage, we drain
# the load balancer back end for this container.
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: "{{ backend_name }}-back"
haproxy_state: disabled
loop_control:
loop_var: backend_name
when:
- "backend_name in group_names"
- "groups[backend_name] | length > 1"
with_items:
- "nova_api_metadata"
- "nova_api_os_compute"
- "nova_console"
- name: Determine management bridge IP address
include_tasks: ../common-tasks/dynamic-address-fact.yml
vars:
network_address: "container_address"
tags:
- always
- name: Configure container
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
when: not is_metal
vars:
extra_container_config_no_restart:
- "lxc.start.order=39"
- include_tasks: ../common-tasks/unbound-clients.yml
when:
- hostvars['localhost']['resolvconf_enabled'] | bool
- name: Add nbd devices to the compute
shell: |
for i in /dev/nbd*;do
lxc-device -n {{ container_name }} add $i $i
done
failed_when: false
register: device_add
changed_when: >
'added' in device_add.stdout.lower()
delegate_to: "{{ physical_host }}"
when:
- container_tech | default('lxc') == 'lxc'
- "'nova_compute' in group_names"
- "not is_metal | bool"
tags:
- always
- name: Add net/tun device to the compute
command: |
lxc-device -n {{ container_name }} add /dev/net/tun /dev/net/tun
delegate_to: "{{ physical_host }}"
when:
- container_tech | default('lxc') == 'lxc'
- "'nova_compute' in group_names"
- "not is_metal | bool"
tags:
- always
- name: Check if kvm device exists
stat:
path: /dev/kvm
delegate_to: "{{ physical_host }}"
register: kvm_device
when:
- container_tech | default('lxc') == 'lxc'
- "'nova_compute' in group_names"
- "not is_metal | bool"
tags:
- always
- name: Add kvm device to the compute
command: |
lxc-device -n {{ container_name }} add /dev/kvm /dev/kvm
delegate_to: "{{ physical_host }}"
register: device_add
failed_when: false
changed_when: >
'added' in device_add.stdout.lower()
when:
- container_tech | default('lxc') == 'lxc'
- "'nova_compute' in group_names"
- "not is_metal | bool"
- "'ischr' in kvm_device.stat and kvm_device.stat.ischr | bool"
tags:
- always
roles:
- role: "os_nova"
nova_management_address: "{{ container_address }}"
nova_cinder_rbd_inuse: "{{ hostvars['localhost']['nova_cinder_rbd_inuse'] | default(False) }}"
- role: "openstack.osa.system_crontab_coordination"
tags:
- crontab
post_tasks:
# Now that container changes are done, we can set
# the load balancer back end for this container
# to available again.
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: "{{ backend_name }}-back"
haproxy_state: enabled
loop_control:
loop_var: backend_name
when:
- "backend_name in group_names"
- "groups[backend_name] | length > 1"
with_items:
- "nova_api_metadata"
- "nova_api_os_compute"
- "nova_console"