
In a deployment that has both Ceph or Swift deployed it can be useful to seperate the network traffic. This change adds support for dedicated storage networks for both Ceph and Swift. By default, the storage hosts are attached to the following networks: * Overcloud admin network * Internal network * Storage network * Storage management network This adds four additional networks, which can be used to seperate the storage network traffic as follows: * Ceph storage network (ceph_storage_net_name) is used to carry Ceph storage data traffic. Defaults to the storage network (storage_net_name). * Ceph storage management network (ceph_storage_mgmt_net_name) is used to carry storage management traffic. Defaults to the storage management network (storage_mgmt_net_name). * Swift storage network (swift_storage_net_name) is used to carry Swift storage data traffic. Defaults to the storage network (storage_net_name). * Swift storage replication network (swift_storage_replication_net_name) is used to carry storage management traffic. Defaults to the storage management network (storage_mgmt_net_name). This change also includes several improvements to Swift device management and ring generation. The device management and ring generation are now separate, with device management occurring during 'kayobe overcloud host configure', and ring generation during a new command, 'kayobe overcloud swift rings generate'. For the device management, we now use standard Ansible modules rather than commands for device preparation. File system labels can be configured for each device individually. For ring generation, all commands are run on a single host, by default a host in the Swift storage group. A python script runs in one of the kolla Swift containers, which consumes an autogenerated YAML config file that defines the layout of the rings. Change-Id: Iedc7535532d706f02d710de69b422abf2f6fe54c
153 lines
5.3 KiB
Plaintext
153 lines
5.3 KiB
Plaintext
---
|
|
###############################################################################
|
|
# Storage node configuration.
|
|
|
|
# User with which to access the storages via SSH during bootstrap, in order
|
|
# to setup the Kayobe user account.
|
|
storage_bootstrap_user: "{{ lookup('env', 'USER') }}"
|
|
|
|
###############################################################################
|
|
# Storage network interface configuration.
|
|
|
|
# List of networks to which storage nodes are attached.
|
|
storage_network_interfaces: >
|
|
{{ (storage_default_network_interfaces +
|
|
storage_extra_network_interfaces +
|
|
([ceph_storage_net_name]
|
|
if storage_needs_ceph_network else []) +
|
|
([ceph_storage_mgmt_net_name]
|
|
if storage_needs_ceph_mgmt_network else []) +
|
|
([swift_storage_net_name]
|
|
if storage_needs_swift_network else []) +
|
|
([swift_storage_replication_net_name]
|
|
if storage_needs_swift_replication_network else [])) | reject('none') | unique | list }}
|
|
|
|
# List of default networks to which storage nodes are attached.
|
|
storage_default_network_interfaces: >
|
|
{{ [admin_oc_net_name,
|
|
internal_net_name,
|
|
storage_mgmt_net_name,
|
|
storage_net_name] | unique | list }}
|
|
|
|
# List of extra networks to which storage nodes are attached.
|
|
storage_extra_network_interfaces: []
|
|
|
|
# Whether this host requires access to Ceph networks.
|
|
storage_needs_ceph_network: >-
|
|
{{ kolla_enable_ceph | bool and
|
|
inventory_hostname in query('inventory_hostnames', ceph_hosts) }}
|
|
|
|
storage_needs_ceph_mgmt_network: >-
|
|
{{ kolla_enable_ceph | bool and
|
|
inventory_hostname in query('inventory_hostnames', ceph_hosts) }}
|
|
|
|
# Whether this host requires access to Swift networks.
|
|
storage_needs_swift_network: >-
|
|
{{ kolla_enable_swift | bool and
|
|
inventory_hostname in query('inventory_hostnames', swift_hosts) }}
|
|
|
|
storage_needs_swift_replication_network: >-
|
|
{{ kolla_enable_swift | bool and
|
|
inventory_hostname in query('inventory_hostnames', swift_hosts) }}
|
|
|
|
###############################################################################
|
|
# Storage node BIOS configuration.
|
|
|
|
# Dict of storage BIOS options. Format is same as that used by stackhpc.drac
|
|
# role.
|
|
storage_bios_config: "{{ storage_bios_config_default | combine(storage_bios_config_extra) }}"
|
|
|
|
# Dict of default storage BIOS options. Format is same as that used by
|
|
# stackhpc.drac role.
|
|
storage_bios_config_default: {}
|
|
|
|
# Dict of additional storage BIOS options. Format is same as that used by
|
|
# stackhpc.drac role.
|
|
storage_bios_config_extra: {}
|
|
|
|
###############################################################################
|
|
# Storage node RAID configuration.
|
|
|
|
# List of storage RAID volumes. Format is same as that used by stackhpc.drac
|
|
# role.
|
|
storage_raid_config: "{{ storage_raid_config_default + storage_raid_config_extra }}"
|
|
|
|
# List of default storage RAID volumes. Format is same as that used by
|
|
# stackhpc.drac role.
|
|
storage_raid_config_default: []
|
|
|
|
# List of additional storage RAID volumes. Format is same as that used by
|
|
# stackhpc.drac role.
|
|
storage_raid_config_extra: []
|
|
|
|
###############################################################################
|
|
# Storage node LVM configuration.
|
|
|
|
# List of storage volume groups. See mrlesmithjr.manage-lvm role for
|
|
# format.
|
|
storage_lvm_groups: "{{ storage_lvm_groups_default + storage_lvm_groups_extra }}"
|
|
|
|
# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for
|
|
# format.
|
|
storage_lvm_groups_default:
|
|
- "{{ storage_lvm_group_data }}"
|
|
|
|
# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role
|
|
# for format.
|
|
storage_lvm_groups_extra: []
|
|
|
|
# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
|
# format.
|
|
storage_lvm_group_data:
|
|
vgname: data
|
|
disks: "{{ storage_lvm_group_data_disks }}"
|
|
create: True
|
|
lvnames: "{{ storage_lvm_group_data_lvs }}"
|
|
|
|
# List of disks for use by storage LVM data volume group. Default to an
|
|
# invalid value to require configuration.
|
|
storage_lvm_group_data_disks:
|
|
- changeme
|
|
|
|
# List of LVM logical volumes for the data volume group.
|
|
storage_lvm_group_data_lvs:
|
|
- "{{ storage_lvm_group_data_lv_docker_volumes }}"
|
|
|
|
# Docker volumes LVM backing volume.
|
|
storage_lvm_group_data_lv_docker_volumes:
|
|
lvname: docker-volumes
|
|
size: "{{ storage_lvm_group_data_lv_docker_volumes_size }}"
|
|
create: True
|
|
filesystem: "{{ storage_lvm_group_data_lv_docker_volumes_fs }}"
|
|
mount: True
|
|
mntp: /var/lib/docker/volumes
|
|
|
|
# Size of docker volumes LVM backing volume.
|
|
storage_lvm_group_data_lv_docker_volumes_size: 75%VG
|
|
|
|
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
|
storage_lvm_group_data_lv_docker_volumes_fs: ext4
|
|
|
|
###############################################################################
|
|
# Storage node Ceph configuration.
|
|
|
|
# List of Ceph disks.
|
|
# The format is a list of dict like :
|
|
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
|
|
# - { osd: "/dev/sdd" }
|
|
# Journal variable is not mandatory.
|
|
storage_ceph_disks: []
|
|
|
|
###############################################################################
|
|
# Storage node sysctl configuration.
|
|
|
|
# Dict of sysctl parameters to set.
|
|
storage_sysctl_parameters: {}
|
|
|
|
###############################################################################
|
|
# Storage node user configuration.
|
|
|
|
# List of users to create. This should be in a format accepted by the
|
|
# singleplatform-eng.users role.
|
|
storage_users: "{{ users_default }}"
|