Configure ansible-lint and fix issues found

Copy ansible-lint configuration from kolla-ansible as a starting point.
Also replace alint tox job by ansible-lint.

Fix various issues found by ansible-lint to make it pass with the
current set of rules.

Change-Id: I1d6173caadbcf249330512e170af8095464f1237
This commit is contained in:
Pierre Riteau
2025-05-27 22:08:39 +02:00
parent 9b805ad190
commit 816d6ffde2
28 changed files with 191 additions and 125 deletions

44
.ansible-lint Normal file
View File

@@ -0,0 +1,44 @@
---
# NOTE(priteau): Rule file imported from kolla-ansible
strict: true
use_default_rules: true
skip_list:
# [E301] Commands should not change things if nothing needs doing
# TODO(mnasiadka): Fix tasks that fail this check in a later iteration
- no-changed-when
# [E503] Tasks that run when changed should likely be handlers
- no-handler
# [unnamed-task] All tasks should be named
# FIXME(mgoddard): Add names to all tasks
- unnamed-task
# disable experimental rules
- experimental
# Package installs should not use latest
- package-latest
# Most files should not contain tabs
- no-tabs
# NOTE(frickler): Agreed at Zed PTG not to use FQCN for builtin actions for now, due to
# conflicts with open patches and backports.
- fqcn-builtins
# Allow Jinja templating inside task and play names
- name[template]
# FQCNs again, now for module actions
- fqcn[action]
# role name check matching ^*$
- role-name
# Allow long lines
- yaml[line-length]
# TODO(frickler): Discuss these in detail, skipping for now to unblock things
- command-instead-of-module
- command-instead-of-shell
- deprecated-local-action
- ignore-errors
- jinja[spacing]
- key-order[task]
- name[play]
- no-free-form
- risky-file-permissions
- risky-shell-pipe
- run-once[task]
- var-naming[no-reserved]
- var-naming[no-role-prefix]

1
.gitignore vendored
View File

@@ -58,6 +58,7 @@ ansible/*.retry
ansible/roles/*/tests/*.retry
# Ansible Galaxy roles & collections
.ansible
ansible/roles/*\.*/
ansible/collections/

View File

@@ -11,7 +11,7 @@
tags:
- apt
tasks:
- name: include apt role
- name: Include apt role
include_role:
name: apt
when: ansible_facts.os_family == 'Debian'

View File

@@ -39,42 +39,42 @@
fail_msg: One or more Ironic variables are undefined.
- block:
- name: Show baremetal node
ansible.builtin.command:
cmd: "{{ venv }}/bin/openstack baremetal node show {{ inventory_hostname }}"
register: node_show
failed_when:
- '"HTTP 404" not in node_show.stderr'
- node_show.rc != 0
changed_when: false
- name: Show baremetal node
ansible.builtin.command:
cmd: "{{ venv }}/bin/openstack baremetal node show {{ inventory_hostname }}"
register: node_show
failed_when:
- '"HTTP 404" not in node_show.stderr'
- node_show.rc != 0
changed_when: false
# NOTE: The openstack.cloud.baremetal_node module cannot be used in this
# script due to requiring a MAC address pre-defined, instead, this should
# be discovered by inpsection following this script.
#
# NOTE: IPMI address must be passed with Redfish address to ensure existing
# Ironic nodes match with new nodes during inspection.
- name: Create baremetal nodes
ansible.builtin.shell:
cmd: |
{{ venv }}/bin/openstack baremetal node create \
--name {{ inventory_hostname }} \
--driver {{ ironic_driver }} \
{% for key, value in ironic_driver_info.items() %}
--driver-info {{ key }}={{ value }} \
{% endfor %}
{% for key, value in ironic_properties.items() %}
--property {{ key }}={{ value }} \
{% endfor %}
--resource-class {{ ironic_resource_class }}
when:
- node_show.rc != 0
# NOTE: The openstack.cloud.baremetal_node module cannot be used in this
# script due to requiring a MAC address pre-defined, instead, this should
# be discovered by inpsection following this script.
#
# NOTE: IPMI address must be passed with Redfish address to ensure existing
# Ironic nodes match with new nodes during inspection.
- name: Create baremetal nodes
ansible.builtin.shell:
cmd: |
{{ venv }}/bin/openstack baremetal node create \
--name {{ inventory_hostname }} \
--driver {{ ironic_driver }} \
{% for key, value in ironic_driver_info.items() %}
--driver-info {{ key }}={{ value }} \
{% endfor %}
{% for key, value in ironic_properties.items() %}
--property {{ key }}={{ value }} \
{% endfor %}
--resource-class {{ ironic_resource_class }}
when:
- node_show.rc != 0
- name: Manage baremetal nodes
ansible.builtin.command:
cmd: "{{ venv }}/bin/openstack baremetal node manage {{ inventory_hostname }} --wait"
when:
- node_show.rc != 0
- name: Manage baremetal nodes
ansible.builtin.command:
cmd: "{{ venv }}/bin/openstack baremetal node manage {{ inventory_hostname }} --wait"
when:
- node_show.rc != 0
delegate_to: "{{ controller_host }}"
vars:
# NOTE: Without this, the controller's ansible_host variable will not

View File

@@ -19,35 +19,35 @@
extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}"
- block:
- name: Fail if allocation pool start not defined
fail:
msg: >
The variable, ironic_serial_console_tcp_pool_start is not defined.
This variable is required to run this playbook.
when: not ironic_serial_console_tcp_pool_start
- name: Fail if allocation pool start not defined
fail:
msg: >
The variable, ironic_serial_console_tcp_pool_start is not defined.
This variable is required to run this playbook.
when: not ironic_serial_console_tcp_pool_start
- name: Fail if allocation pool end not defined
fail:
msg: >
The variable, ironic_serial_console_tcp_pool_end is not defined.
This variable is required to run this playbook.
when:
- not ironic_serial_console_tcp_pool_end
- name: Fail if allocation pool end not defined
fail:
msg: >
The variable, ironic_serial_console_tcp_pool_end is not defined.
This variable is required to run this playbook.
when:
- not ironic_serial_console_tcp_pool_end
- name: Get list of nodes that we should configure serial consoles on
set_fact:
baremetal_nodes: >-
{{ query('inventory_hostnames', console_compute_node_limit |
default('baremetal-compute') ) | unique }}
- name: Get list of nodes that we should configure serial consoles on
set_fact:
baremetal_nodes: >-
{{ query('inventory_hostnames', console_compute_node_limit |
default('baremetal-compute') ) | unique }}
- name: Reserve TCP ports for ironic serial consoles
include_role:
name: console-allocation
vars:
console_allocation_pool_start: "{{ ironic_serial_console_tcp_pool_start }}"
console_allocation_pool_end: "{{ ironic_serial_console_tcp_pool_end }}"
console_allocation_ironic_nodes: "{{ baremetal_nodes }}"
console_allocation_filename: "{{ kayobe_env_config_path }}/console-allocation.yml"
- name: Reserve TCP ports for ironic serial consoles
include_role:
name: console-allocation
vars:
console_allocation_pool_start: "{{ ironic_serial_console_tcp_pool_start }}"
console_allocation_pool_end: "{{ ironic_serial_console_tcp_pool_end }}"
console_allocation_ironic_nodes: "{{ baremetal_nodes }}"
console_allocation_filename: "{{ kayobe_env_config_path }}/console-allocation.yml"
when: cmd == "enable"
- name: Enable serial console
@@ -76,37 +76,37 @@
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
- block:
- name: Fail if console interface is not ipmitool-socat
fail:
msg: >-
In order to use the serial console you must set the console_interface to ipmitool-socat.
when: node["Console Interface"] != "ipmitool-socat"
- name: Fail if console interface is not ipmitool-socat
fail:
msg: >-
In order to use the serial console you must set the console_interface to ipmitool-socat.
when: node["Console Interface"] != "ipmitool-socat"
- name: Set IPMI serial console terminal port
vars:
name: "{{ node['Name'] }}"
port: "{{ hostvars[controller_host].console_allocation_result.ports[name] }}"
# NOTE: Without this, the controller's ansible_host variable will not
# be respected when using delegate_to.
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
command: >
{{ venv }}/bin/openstack baremetal node set {{ name }} --driver-info ipmi_terminal_port={{ port }}
delegate_to: "{{ controller_host }}"
environment: "{{ openstack_auth_env }}"
when: >-
node['Driver Info'].ipmi_terminal_port is not defined or
node['Driver Info'].ipmi_terminal_port | int != port | int
- name: Set IPMI serial console terminal port
vars:
name: "{{ node['Name'] }}"
port: "{{ hostvars[controller_host].console_allocation_result.ports[name] }}"
# NOTE: Without this, the controller's ansible_host variable will not
# be respected when using delegate_to.
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
command: >
{{ venv }}/bin/openstack baremetal node set {{ name }} --driver-info ipmi_terminal_port={{ port }}
delegate_to: "{{ controller_host }}"
environment: "{{ openstack_auth_env }}"
when: >-
node['Driver Info'].ipmi_terminal_port is not defined or
node['Driver Info'].ipmi_terminal_port | int != port | int
- name: Enable the IPMI socat serial console
vars:
# NOTE: Without this, the controller's ansible_host variable will not
# be respected when using delegate_to.
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
command: >
{{ venv }}/bin/openstack baremetal node console enable {{ node['Name'] }}
delegate_to: "{{ controller_host }}"
environment: "{{ openstack_auth_env }}"
when: not node['Console Enabled']
- name: Enable the IPMI socat serial console
vars:
# NOTE: Without this, the controller's ansible_host variable will not
# be respected when using delegate_to.
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
command: >
{{ venv }}/bin/openstack baremetal node console enable {{ node['Name'] }}
delegate_to: "{{ controller_host }}"
environment: "{{ openstack_auth_env }}"
when: not node['Console Enabled']
vars:
matching_nodes: >-
{{ (nodes.stdout | from_json) | selectattr('Name', 'defined') |

View File

@@ -15,7 +15,7 @@
tasks:
- name: Set a fact about the kayobe target virtualenv
set_fact:
virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}"
kayobe_virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}"
when:
- ansible_python_interpreter is defined
- not ansible_python_interpreter.startswith('/bin')
@@ -47,12 +47,12 @@
mode: 0755
# Check whether the virtualenv directory is a subdirectory of the
# global virtualenv directory.
when: virtualenv.startswith(virtualenv_path)
when: kayobe_virtualenv.startswith(virtualenv_path)
become: True
- name: Ensure kayobe virtualenv directory exists
file:
path: "{{ virtualenv }}"
path: "{{ kayobe_virtualenv }}"
state: directory
owner: "{{ ansible_facts.user_uid }}"
group: "{{ ansible_facts.user_gid }}"
@@ -69,7 +69,7 @@
pip:
name: pip
state: latest
virtualenv: "{{ virtualenv }}"
virtualenv: "{{ kayobe_virtualenv }}"
# Site packages are required for using the dnf module, which is not
# available via PyPI.
virtualenv_site_packages: True
@@ -79,14 +79,14 @@
pip:
name: selinux
state: latest
virtualenv: "{{ virtualenv }}"
virtualenv: "{{ kayobe_virtualenv }}"
when:
- ansible_facts.os_family == 'RedHat'
vars:
# Use the system python interpreter since the virtualenv might not
# exist.
ansible_python_interpreter: "{{ ansible_facts.python.executable }}"
when: virtualenv is defined
when: kayobe_virtualenv is defined
# If we gathered facts earlier it would have been with a different Python
# interpreter. For gathering modes that may use a fact cache, gather facts
@@ -96,7 +96,7 @@
filter: "{{ kayobe_ansible_setup_filter }}"
gather_subset: "{{ kayobe_ansible_setup_gather_subset }}"
when:
- virtualenv is defined
- kayobe_virtualenv is defined
- gather_facts is not skipped
- lookup('config', 'DEFAULT_GATHERING') != 'implicit'
@@ -110,15 +110,15 @@
name: "{{ packages | select | list }}"
state: present
become: True
when: virtualenv is not defined
when: kayobe_virtualenv is not defined
- name: Ensure kolla-ansible virtualenv has docker SDK for python installed
pip:
name: docker
state: latest
virtualenv: "{{ virtualenv | default(omit) }}"
virtualenv: "{{ kayobe_virtualenv | default(omit) }}"
extra_args: "{% if docker_upper_constraints_file %}-c {{ docker_upper_constraints_file }}{% endif %}"
become: "{{ virtualenv is not defined }}"
become: "{{ kayobe_virtualenv is not defined }}"
vars:
docker_upper_constraints_file: "{{ pip_upper_constraints_file }}"
when:
@@ -127,9 +127,9 @@
- name: Ensure kayobe virtualenv has podman SDK installed
import_role:
name: openstack.kolla.podman_sdk
name: openstack.kolla.podman_sdk
vars:
virtualenv: "{{ virtualenv }}"
virtualenv: "{{ kayobe_virtualenv }}"
podman_sdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"
when:
- "'container-engine' in group_names"

View File

@@ -57,7 +57,7 @@
virtualenv: "{{ kolla_ansible_target_venv }}"
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
become: True
when: "{{ container_engine == 'docker' }}"
when: container_engine == 'docker'
- name: Ensure kolla-ansible virtualenv has podman SDK installed
pip:
@@ -66,7 +66,7 @@
virtualenv: "{{ kolla_ansible_target_venv }}"
extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}"
become: True
when: "{{ container_engine == 'podman' }}"
when: container_engine == 'podman'
- name: Ensure kolla-ansible virtualenv has SELinux bindings installed
pip:

View File

@@ -9,7 +9,7 @@
tags:
- mdadm
roles:
- name: mrlesmithjr.mdadm
- role: mrlesmithjr.mdadm
become: True
when:
- mdadm_arrays is defined

View File

@@ -1,3 +1,4 @@
---
- name: Configure HTTP(S) proxy settings
hosts: seed-hypervisor:seed:overcloud:infra-vms
max_fail_percentage: >-

View File

@@ -1,5 +1,5 @@
---
- name: reload systemd daemon
- name: Reload systemd daemon
systemd:
name: cloud-init
daemon_reload: yes

View File

@@ -5,5 +5,5 @@
state: touch
mode: "u=rw,g=r,o=r"
notify:
- reload systemd daemon
- Reload systemd daemon
become: True

View File

@@ -4,7 +4,7 @@
path: /etc/dnf/dnf.conf
section: "main"
option: "{{ item.key }}"
value: "{{ item.value }}"
value: "{{ item.value }}"
loop: "{{ query('dict', dnf_config) }}"
become: true

View File

@@ -14,4 +14,4 @@
- volumes
when: "'/' not in volume"
vars:
volume: "{{ item.1.split(':')[0] }}"
volume: "{{ item.1.split(':')[0] }}"

View File

@@ -1,7 +1,7 @@
---
- import_tasks: prerequisites.yml
- name: list all VMs on hypervisor
- name: List all VMs on hypervisor
virt:
command: list_vms
register: all_vms

View File

@@ -159,4 +159,4 @@
# newer versions.
ANSIBLE_COLLECTIONS_SCAN_SYS_PATH: "False"
# NOTE(wszumski): Don't use path configured for kayobe
ANSIBLE_COLLECTIONS_PATH:
ANSIBLE_COLLECTIONS_PATH: ''

View File

@@ -176,7 +176,6 @@ neutron_tenant_network_types: {{ kolla_neutron_ml2_tenant_network_types | join('
# ulimits:
#############
# TLS options
#############

View File

@@ -137,7 +137,7 @@
- name: Check that no inventory overrides are configured
assert:
that:
- kolla_ansible_overcloud_inventory_overrides.matched == 0
- kolla_ansible_overcloud_inventory_overrides.matched == 0
msg: >
Overcloud group vars were found when they should not be set.

View File

@@ -138,7 +138,7 @@ kolla_openstack_custom_config_include_globs_default:
glob: nova/**
- enabled: '{{ kolla_enable_nova | bool }}'
glob: nova_compute/**
- enabled: '{{ kolla_enable_octavia | bool }}'
- enabled: '{{ kolla_enable_octavia | bool }}'
glob: octavia.conf
- enabled: '{{ kolla_enable_octavia | bool }}'
glob: octavia/**

View File

@@ -14,4 +14,4 @@
vars:
container_name: "{{ item.key }}"
container_config: "{{ item.value }}"
with_dict: "{{ seed_containers }}"
with_dict: "{{ seed_containers }}"

View File

@@ -1,6 +1,7 @@
---
- name: Destroy containers (loop)
include_tasks: destroy-container.yml
vars:
container_name: "{{ item.key }}"
container_config: "{{ item.value }}"
with_dict: "{{ seed_containers }}"
with_dict: "{{ seed_containers }}"

View File

@@ -32,7 +32,7 @@
- block:
- name: Test the swift-block-devices role
include_role:
name: ../../swift-block-devices
name: ../../swift-block-devices
vars:
swift_block_devices:
- device: "{{ loopback.stdout }}"

View File

@@ -7,7 +7,7 @@
- block:
- name: Test the swift-block-devices role
include_role:
name: ../../swift-block-devices
name: ../../swift-block-devices
vars:
swift_block_devices:
- /dev/fake

View File

@@ -24,7 +24,7 @@
- block:
- name: Test the swift-block-devices role
include_role:
name: ../../swift-block-devices
name: ../../swift-block-devices
vars:
swift_block_devices:
- device: "{{ loopback.stdout }}"

View File

@@ -11,4 +11,3 @@
- ssh-known-host
roles:
- role: ssh-known-host

View File

@@ -9,7 +9,7 @@
tags:
- tuned
roles:
- name: giovtorres.tuned
- role: giovtorres.tuned
become: true
when:
- tuned_active_builtin_profile != ""

View File

@@ -1,2 +1,3 @@
---
aio_ips:
controller1: 192.168.33.3

View File

@@ -75,10 +75,10 @@ Environments
The following tox environments are provided:
alint
Run Ansible linter.
ansible
Run Ansible tests for some ansible roles using Ansible playbooks.
ansible-lint
Run Ansible linter.
ansible-syntax
Run a syntax check for all Ansible files.
docs

24
tox.ini
View File

@@ -69,8 +69,28 @@ commands =
-p {toxinidir}/ansible/roles
bash -c "source {envdir}/bin/activate && {toxinidir}/tools/test-molecule.sh {posargs}"
[testenv:alint]
commands = bash -c "ansible-lint {toxinidir}/ansible/*.yml"
[testenv:linters]
# Env vars and deps need to be defined in top level tox env
setenv =
ANSIBLE_ACTION_PLUGINS = {toxinidir}/ansible/action_plugins
ANSIBLE_FILTER_PLUGINS = {toxinidir}/ansible/filter_plugins
ANSIBLE_ROLES_PATH = {toxinidir}/ansible/roles
deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
{[testenv:ansible-lint]commands}
[testenv:ansible-lint]
# Lint only code in ansible/* - ignore various folders used by CI
# TODO(priteau): Ignore YAML linting issues in plugins and figure out why
# idrac-bootstrap.yml fails.
setenv = {[testenv:linters]setenv}
deps = {[testenv:linters]deps}
commands =
ansible-lint -p --exclude etc --exclude kayobe/plugins --exclude playbooks --exclude releasenotes --exclude roles --exclude zuul.d --exclude ansible/idrac-bootstrap.yml
[testenv:ansible-syntax]
commands =