Confirm operational status of mgmt port
Put charm into a blocked state if the unit local port to mgmt. network does not come up. Add workaround in functional test that allows to re-enable bionic-ussuri gate. Drop proposed from bionic-ussuri bundles. Closes-Bug: #1893446 Change-Id: I098e449723bce128e8c2efda1e9fafc9156a71e5
This commit is contained in:
@@ -25,6 +25,7 @@ import base64
|
|||||||
import neutronclient
|
import neutronclient
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import tenacity
|
||||||
|
|
||||||
from keystoneauth1 import identity as keystone_identity
|
from keystoneauth1 import identity as keystone_identity
|
||||||
from keystoneauth1 import session as keystone_session
|
from keystoneauth1 import session as keystone_session
|
||||||
@@ -407,6 +408,53 @@ def toggle_hm_port(identity_service, local_unit_name, enabled=True):
|
|||||||
nc.update_port(port['id'], {'port': {'admin_state_up': enabled}})
|
nc.update_port(port['id'], {'port': {'admin_state_up': enabled}})
|
||||||
|
|
||||||
|
|
||||||
|
def is_hm_port_bound(identity_service, local_unit_name):
|
||||||
|
"""Retrieve bound status of Neutron port for local unit.
|
||||||
|
|
||||||
|
:param identity_service: reactive Endpoint of type ``identity-service``
|
||||||
|
:type identity_service: RelationBase class
|
||||||
|
:param local_unit_name: Name of juju unit, used to build tag name for port
|
||||||
|
:type local_unit_name: str
|
||||||
|
:returns: True if up, False if down, None if no port found
|
||||||
|
:rtype: Optional[bool]
|
||||||
|
:raises: api_crud.APIUnavailable
|
||||||
|
"""
|
||||||
|
session = session_from_identity_service(identity_service)
|
||||||
|
try:
|
||||||
|
nc = init_neutron_client(session)
|
||||||
|
port = lookup_hm_port(nc, local_unit_name)
|
||||||
|
except NEUTRON_TEMP_EXCS as e:
|
||||||
|
raise APIUnavailable('neutron', 'ports', e)
|
||||||
|
if port:
|
||||||
|
# The operational status field for our port is unfortunately not
|
||||||
|
# accurate. But we can use the binding_vif_type field to detect if
|
||||||
|
# binding failed.
|
||||||
|
return port['binding:vif_type'] != 'binding_failed'
|
||||||
|
|
||||||
|
|
||||||
|
def wait_for_hm_port_bound(identity_service, local_unit_name):
|
||||||
|
"""Wait for port binding status of Neutron port for local unit.
|
||||||
|
|
||||||
|
:param identity_service: reactive Endpoint of type ``identity-service``
|
||||||
|
:type identity_service: RelationBase class
|
||||||
|
:param local_unit_name: Name of juju unit, used to build tag name for port
|
||||||
|
:type local_unit_name: str
|
||||||
|
:returns: True if state was reached, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
for attempt in tenacity.Retrying(
|
||||||
|
stop=tenacity.stop_after_attempt(3),
|
||||||
|
wait=tenacity.wait_exponential(
|
||||||
|
multiplier=1, min=2, max=10)):
|
||||||
|
with attempt:
|
||||||
|
port_up = is_hm_port_bound(
|
||||||
|
identity_service, local_unit_name)
|
||||||
|
assert port_up is True
|
||||||
|
return port_up
|
||||||
|
except tenacity.RetryError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def setup_hm_port(identity_service, octavia_charm, host_id=None):
|
def setup_hm_port(identity_service, octavia_charm, host_id=None):
|
||||||
"""Create a per unit Neutron and OVS port for Octavia Health Manager.
|
"""Create a per unit Neutron and OVS port for Octavia Health Manager.
|
||||||
|
|
||||||
@@ -439,6 +487,12 @@ def setup_hm_port(identity_service, octavia_charm, host_id=None):
|
|||||||
HM_PORT_MAC = hm_port['mac_address']
|
HM_PORT_MAC = hm_port['mac_address']
|
||||||
HM_PORT_ID = hm_port['id']
|
HM_PORT_ID = hm_port['id']
|
||||||
try:
|
try:
|
||||||
|
# TODO: We should add/update the OVS port and interface parameters on
|
||||||
|
# every call. Otherwise mac-address binding may get out of sync with
|
||||||
|
# what is in Neutron on port re-create etc. Remove the pre-flight ip
|
||||||
|
# link check and call ovs-vsctl with ``--may-exist`` instead. We would
|
||||||
|
# need to add some code to check if something changed or not.
|
||||||
|
|
||||||
subprocess.check_output(
|
subprocess.check_output(
|
||||||
['ip', 'link', 'show', octavia.OCTAVIA_MGMT_INTF],
|
['ip', 'link', 'show', octavia.OCTAVIA_MGMT_INTF],
|
||||||
stderr=subprocess.STDOUT, universal_newlines=True)
|
stderr=subprocess.STDOUT, universal_newlines=True)
|
||||||
@@ -469,17 +523,21 @@ def setup_hm_port(identity_service, octavia_charm, host_id=None):
|
|||||||
else:
|
else:
|
||||||
# unknown error, raise
|
# unknown error, raise
|
||||||
raise e
|
raise e
|
||||||
if not hm_port['admin_state_up'] or hm_port['status'] == 'DOWN':
|
if (not hm_port['admin_state_up'] or
|
||||||
|
not is_hm_port_bound(identity_service,
|
||||||
|
octavia_charm.local_unit_name) or
|
||||||
|
hm_port['status'] == 'DOWN'):
|
||||||
# NOTE(fnordahl) there appears to be a handful of race conditions
|
# NOTE(fnordahl) there appears to be a handful of race conditions
|
||||||
# hitting us sometimes making the newly created ports unusable.
|
# hitting us sometimes making the newly created ports unusable.
|
||||||
# as a workaround we toggle the port belonging to us.
|
# as a workaround we toggle the port belonging to us.
|
||||||
# a disable/enable round trip makes Neutron reset the port
|
# a disable/enable round trip makes Neutron reset the port
|
||||||
# configuration which resolves these situations.
|
# configuration which resolves these situations.
|
||||||
ch_core.hookenv.log('toggling port {} (admin_state_up: {} '
|
ch_core.hookenv.log('toggling port {} (admin_state_up: {} '
|
||||||
'status: {})'
|
'status: {} binding:vif_type: {})'
|
||||||
.format(hm_port['id'],
|
.format(hm_port['id'],
|
||||||
hm_port['admin_state_up'],
|
hm_port['admin_state_up'],
|
||||||
hm_port['status']),
|
hm_port['status'],
|
||||||
|
hm_port['binding:vif_type']),
|
||||||
level=ch_core.hookenv.INFO)
|
level=ch_core.hookenv.INFO)
|
||||||
|
|
||||||
toggle_hm_port(identity_service,
|
toggle_hm_port(identity_service,
|
||||||
|
@@ -402,6 +402,11 @@ class BaseOctaviaCharm(ch_plugins.PolicydOverridePlugin,
|
|||||||
('crud.available', # imaginate ``crud`` relation
|
('crud.available', # imaginate ``crud`` relation
|
||||||
'blocked',
|
'blocked',
|
||||||
'Awaiting {} to create required resources'.format(who))]
|
'Awaiting {} to create required resources'.format(who))]
|
||||||
|
else:
|
||||||
|
states_to_check['octavia'] = [
|
||||||
|
('octavia.hm-port.available',
|
||||||
|
'blocked',
|
||||||
|
'Virtual network for access to Amphorae is down')]
|
||||||
# if these configuration options are at default value it means they are
|
# if these configuration options are at default value it means they are
|
||||||
# not set by end-user, they are required for successfull creation of
|
# not set by end-user, they are required for successfull creation of
|
||||||
# load balancer instances.
|
# load balancer instances.
|
||||||
|
@@ -164,6 +164,12 @@ def setup_hm_port():
|
|||||||
.format(e),
|
.format(e),
|
||||||
level=ch_core.hookenv.DEBUG)
|
level=ch_core.hookenv.DEBUG)
|
||||||
return
|
return
|
||||||
|
# Confirm port comes up from Neutron POV
|
||||||
|
if api_crud.wait_for_hm_port_bound(
|
||||||
|
identity_service, octavia_charm.local_unit_name):
|
||||||
|
reactive.set_flag('octavia.hm-port.available')
|
||||||
|
else:
|
||||||
|
reactive.clear_flag('octavia.hm-port.available')
|
||||||
|
|
||||||
|
|
||||||
@reactive.when_not('is-update-status-hook')
|
@reactive.when_not('is-update-status-hook')
|
||||||
|
@@ -88,12 +88,12 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/glance
|
charm: cs:~openstack-charmers-next/glance
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
keystone:
|
keystone:
|
||||||
charm: cs:~openstack-charmers-next/keystone
|
charm: cs:~openstack-charmers-next/keystone
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
mysql:
|
mysql:
|
||||||
constraints: mem=3072M
|
constraints: mem=3072M
|
||||||
charm: cs:~openstack-charmers-next/percona-cluster
|
charm: cs:~openstack-charmers-next/percona-cluster
|
||||||
@@ -103,7 +103,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/neutron-api
|
charm: cs:~openstack-charmers-next/neutron-api
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
flat-network-providers: physnet1
|
flat-network-providers: physnet1
|
||||||
enable-ml2-port-security: True
|
enable-ml2-port-security: True
|
||||||
@@ -113,7 +113,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/nova-cloud-controller
|
charm: cs:~openstack-charmers-next/nova-cloud-controller
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
network-manager: Neutron
|
network-manager: Neutron
|
||||||
nova-compute:
|
nova-compute:
|
||||||
@@ -121,7 +121,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/nova-compute
|
charm: cs:~openstack-charmers-next/nova-compute
|
||||||
num_units: 2
|
num_units: 2
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
hacluster-octavia:
|
hacluster-octavia:
|
||||||
series: bionic
|
series: bionic
|
||||||
@@ -132,7 +132,7 @@ applications:
|
|||||||
charm: ../../../octavia
|
charm: ../../../octavia
|
||||||
num_units: 3
|
num_units: 3
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
loadbalancer-topology: 'ACTIVE_STANDBY'
|
loadbalancer-topology: 'ACTIVE_STANDBY'
|
||||||
rabbitmq-server:
|
rabbitmq-server:
|
||||||
@@ -155,7 +155,7 @@ applications:
|
|||||||
num_units: 1
|
num_units: 1
|
||||||
constraints: mem=1G
|
constraints: mem=1G
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: true
|
debug: true
|
||||||
neutron-api-plugin-ovn:
|
neutron-api-plugin-ovn:
|
||||||
charm: cs:~openstack-charmers-next/neutron-api-plugin-ovn
|
charm: cs:~openstack-charmers-next/neutron-api-plugin-ovn
|
||||||
@@ -164,7 +164,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/ovn-central
|
charm: cs:~openstack-charmers-next/ovn-central
|
||||||
num_units: 3
|
num_units: 3
|
||||||
options:
|
options:
|
||||||
source: cloud:bionic-ussuri/proposed
|
source: cloud:bionic-ussuri
|
||||||
ovn-chassis:
|
ovn-chassis:
|
||||||
charm: cs:~openstack-charmers-next/ovn-chassis
|
charm: cs:~openstack-charmers-next/ovn-chassis
|
||||||
vault:
|
vault:
|
||||||
|
@@ -84,12 +84,12 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/glance
|
charm: cs:~openstack-charmers-next/glance
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
keystone:
|
keystone:
|
||||||
charm: cs:~openstack-charmers-next/keystone
|
charm: cs:~openstack-charmers-next/keystone
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
mysql:
|
mysql:
|
||||||
constraints: mem=3072M
|
constraints: mem=3072M
|
||||||
charm: cs:~openstack-charmers-next/percona-cluster
|
charm: cs:~openstack-charmers-next/percona-cluster
|
||||||
@@ -98,7 +98,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/neutron-api
|
charm: cs:~openstack-charmers-next/neutron-api
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
flat-network-providers: physnet1
|
flat-network-providers: physnet1
|
||||||
enable-ml2-port-security: True
|
enable-ml2-port-security: True
|
||||||
@@ -129,7 +129,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/nova-cloud-controller
|
charm: cs:~openstack-charmers-next/nova-cloud-controller
|
||||||
num_units: 1
|
num_units: 1
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
network-manager: Neutron
|
network-manager: Neutron
|
||||||
nova-compute:
|
nova-compute:
|
||||||
@@ -137,7 +137,7 @@ applications:
|
|||||||
charm: cs:~openstack-charmers-next/nova-compute
|
charm: cs:~openstack-charmers-next/nova-compute
|
||||||
num_units: 2
|
num_units: 2
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
hacluster-octavia:
|
hacluster-octavia:
|
||||||
charm: cs:~openstack-charmers-next/hacluster
|
charm: cs:~openstack-charmers-next/hacluster
|
||||||
@@ -148,7 +148,7 @@ applications:
|
|||||||
charm: ../../../octavia
|
charm: ../../../octavia
|
||||||
num_units: 3
|
num_units: 3
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: True
|
debug: True
|
||||||
loadbalancer-topology: 'ACTIVE_STANDBY'
|
loadbalancer-topology: 'ACTIVE_STANDBY'
|
||||||
vip: 'ADD YOUR VIP HERE'
|
vip: 'ADD YOUR VIP HERE'
|
||||||
@@ -172,7 +172,7 @@ applications:
|
|||||||
num_units: 1
|
num_units: 1
|
||||||
constraints: mem=1G
|
constraints: mem=1G
|
||||||
options:
|
options:
|
||||||
openstack-origin: cloud:bionic-ussuri/proposed
|
openstack-origin: cloud:bionic-ussuri
|
||||||
debug: true
|
debug: true
|
||||||
vault:
|
vault:
|
||||||
charm: cs:~openstack-charmers-next/vault
|
charm: cs:~openstack-charmers-next/vault
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
charm_name: octavia
|
charm_name: octavia
|
||||||
gate_bundles:
|
gate_bundles:
|
||||||
|
- bionic-rocky-ha
|
||||||
- bionic-stein-ha
|
- bionic-stein-ha
|
||||||
- bionic-train-ha-ovn
|
- bionic-train-ha-ovn
|
||||||
- bionic-train-ha
|
- bionic-train-ha
|
||||||
@@ -15,12 +16,12 @@ gate_bundles:
|
|||||||
smoke_bundles:
|
smoke_bundles:
|
||||||
- focal-ussuri-ha-ovn
|
- focal-ussuri-ha-ovn
|
||||||
- focal-ussuri-ha
|
- focal-ussuri-ha
|
||||||
dev_bundles:
|
|
||||||
- bionic-rocky-ha
|
|
||||||
- bionic-rocky-lxd
|
|
||||||
comment: |
|
comment: |
|
||||||
The `bionic-rocky-lxd` bundle currently fails due to a bug in LXD.
|
The `bionic-rocky-lxd` bundle currently fails due to a bug in LXD.
|
||||||
https://github.com/lxc/lxd/issues/4947
|
https://github.com/lxc/lxd/issues/4947
|
||||||
|
dev_bundles:
|
||||||
|
- bionic-rocky-ha
|
||||||
|
- bionic-rocky-lxd
|
||||||
target_deploy_status:
|
target_deploy_status:
|
||||||
octavia:
|
octavia:
|
||||||
workload-status: blocked
|
workload-status: blocked
|
||||||
|
@@ -16,7 +16,7 @@ pytest-runner
|
|||||||
# layer-basic requires setuptools<42, zipp>=2.0.0 requires setuptools>42
|
# layer-basic requires setuptools<42, zipp>=2.0.0 requires setuptools>42
|
||||||
# LP: #1862186
|
# LP: #1862186
|
||||||
zipp < 2.0.0
|
zipp < 2.0.0
|
||||||
|
tenacity
|
||||||
# cryptography 3.4 introduces a requirement for rust code in the module. As it has to be compiled
|
# cryptography 3.4 introduces a requirement for rust code in the module. As it has to be compiled
|
||||||
# on the machine during install, this breaks installs. Instead pin to <3.4 until a solution can be
|
# on the machine during install, this breaks installs. Instead pin to <3.4 until a solution can be
|
||||||
# found that doesn't require compiling on the target machine.
|
# found that doesn't require compiling on the target machine.
|
||||||
|
@@ -12,6 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import contextlib
|
||||||
import mock
|
import mock
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
@@ -247,6 +248,39 @@ class TestAPICrud(test_utils.PatchHelper):
|
|||||||
nc.update_port.assert_called_with('fake-port-uuid',
|
nc.update_port.assert_called_with('fake-port-uuid',
|
||||||
{'port': {'admin_state_up': True}})
|
{'port': {'admin_state_up': True}})
|
||||||
|
|
||||||
|
def test_is_hm_port_bound(self):
|
||||||
|
self.patch_object(api_crud, 'session_from_identity_service')
|
||||||
|
self.patch_object(api_crud, 'init_neutron_client')
|
||||||
|
self.patch_object(api_crud, 'lookup_hm_port')
|
||||||
|
self.lookup_hm_port.return_value = None
|
||||||
|
self.assertEquals(
|
||||||
|
api_crud.is_hm_port_bound('ids', 'fake-unit-name'), None)
|
||||||
|
self.lookup_hm_port.assert_called_once_with(
|
||||||
|
mock.ANY, 'fake-unit-name')
|
||||||
|
self.lookup_hm_port.return_value = {'binding:vif_type': 'nonfailure'}
|
||||||
|
self.assertTrue(api_crud.is_hm_port_bound('ids', 'fake-unit-name'))
|
||||||
|
self.lookup_hm_port.return_value = {
|
||||||
|
'binding:vif_type': 'binding_failed'}
|
||||||
|
self.assertFalse(api_crud.is_hm_port_bound('ids', 'fake-unit-name'))
|
||||||
|
|
||||||
|
def test_wait_for_hm_port_bound(self):
|
||||||
|
self.patch_object(api_crud.tenacity, 'Retrying')
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def fake_context_manager():
|
||||||
|
# TODO: Replace with `contextlib.nullcontext()` once we have
|
||||||
|
# deprecated support for Python 3.4, 3.5 and 3.6
|
||||||
|
yield None
|
||||||
|
|
||||||
|
self.Retrying.return_value = [fake_context_manager()]
|
||||||
|
self.patch_object(api_crud, 'is_hm_port_bound')
|
||||||
|
self.is_hm_port_bound.return_value = True
|
||||||
|
self.assertTrue(api_crud.wait_for_hm_port_bound(
|
||||||
|
'ids', 'fake-unit-name'))
|
||||||
|
self.Retrying.side_effect = api_crud.tenacity.RetryError(None)
|
||||||
|
self.assertFalse(api_crud.wait_for_hm_port_bound(
|
||||||
|
'ids', 'fake-unit-name'))
|
||||||
|
|
||||||
def test_setup_hm_port(self):
|
def test_setup_hm_port(self):
|
||||||
self.patch('subprocess.check_output', 'check_output')
|
self.patch('subprocess.check_output', 'check_output')
|
||||||
self.patch('subprocess.check_call', 'check_call')
|
self.patch('subprocess.check_call', 'check_call')
|
||||||
@@ -260,6 +294,7 @@ class TestAPICrud(test_utils.PatchHelper):
|
|||||||
'id': port_uuid,
|
'id': port_uuid,
|
||||||
'mac_address': port_mac_address,
|
'mac_address': port_mac_address,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
|
'binding:vif_type': 'binding_failed',
|
||||||
'status': 'DOWN',
|
'status': 'DOWN',
|
||||||
}
|
}
|
||||||
e = subprocess.CalledProcessError(returncode=1, cmd=None)
|
e = subprocess.CalledProcessError(returncode=1, cmd=None)
|
||||||
|
@@ -149,12 +149,16 @@ class TestOctaviaHandlers(test_utils.PatchHelper):
|
|||||||
self.patch('charms.reactive.endpoint_from_flag', 'endpoint_from_flag')
|
self.patch('charms.reactive.endpoint_from_flag', 'endpoint_from_flag')
|
||||||
self.patch('charms.reactive.set_flag', 'set_flag')
|
self.patch('charms.reactive.set_flag', 'set_flag')
|
||||||
self.patch_object(handlers.api_crud, 'setup_hm_port')
|
self.patch_object(handlers.api_crud, 'setup_hm_port')
|
||||||
|
self.patch_object(handlers.api_crud, 'wait_for_hm_port_bound')
|
||||||
|
self.wait_for_hm_port_bound = True
|
||||||
handlers.setup_hm_port()
|
handlers.setup_hm_port()
|
||||||
self.setup_hm_port.assert_called_with(
|
self.setup_hm_port.assert_called_with(
|
||||||
self.endpoint_from_flag(),
|
self.endpoint_from_flag(),
|
||||||
self.octavia_charm,
|
self.octavia_charm,
|
||||||
host_id=self.endpoint_from_flag().host())
|
host_id=self.endpoint_from_flag().host())
|
||||||
self.set_flag.assert_called_once_with('config.changed')
|
self.set_flag.assert_has_calls([
|
||||||
|
mock.call('config.changed'),
|
||||||
|
mock.call('octavia.hm-port.available')])
|
||||||
self.setup_hm_port.reset_mock()
|
self.setup_hm_port.reset_mock()
|
||||||
ovsdb_subordinate = mock.MagicMock()
|
ovsdb_subordinate = mock.MagicMock()
|
||||||
identity_service = mock.MagicMock()
|
identity_service = mock.MagicMock()
|
||||||
|
Reference in New Issue
Block a user