diff --git a/README.md b/README.md index 548c742..f8f4611 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,26 @@ related services: juju add-relation keystone percona-cluster +Network Space support +--------------------- + +This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. + +You can ensure that database connections are bound to a specific network space by binding the appropriate interfaces: + + juju deploy percona-cluster --bind "shared-db=internal-space" + +alternatively these can also be provided as part of a juju native bundle configuration: + + percona-cluster: + charm: cs:xenial/percona-cluster + num_units: 1 + bindings: + shared-db: internal-space + +**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. + +**NOTE:** Existing deployments using the access-network configuration option will continue to function; this option is preferred over any network space binding provided if set. Limitiations ============ diff --git a/actions/backup b/actions/backup new file mode 120000 index 0000000..405a394 --- /dev/null +++ b/actions/backup @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py index 4efe799..b9c7900 100644 --- a/charmhelpers/contrib/network/ip.py +++ b/charmhelpers/contrib/network/ip.py @@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + def format_ipv6_addr(address): """If address is IPv6, wrap it in '[]' otherwise return None. diff --git a/hooks/percona_hooks.py b/hooks/percona_hooks.py index 1b18166..39ce058 100755 --- a/hooks/percona_hooks.py +++ b/hooks/percona_hooks.py @@ -24,6 +24,7 @@ from charmhelpers.core.hookenv import ( INFO, WARNING, is_leader, + network_get_primary_address, ) from charmhelpers.core.host import ( service, @@ -80,6 +81,7 @@ from charmhelpers.contrib.network.ip import ( get_netmask_for_address, get_ipv6_addr, is_address_in_network, + resolve_network_cidr, ) from charmhelpers.contrib.charmsupport import nrpe @@ -365,18 +367,21 @@ def db_changed(relation_id=None, unit=None, admin=None): }) -def get_db_host(client_hostname): +def get_db_host(client_hostname, interface='shared-db'): """Get address of local database host. If an access-network has been configured, expect selected address to be on that network. If none can be found, revert to primary address. + If network spaces are supported (Juju >= 2.0), use network-get to + retrieve the network binding for the interface. + If vip(s) are configured, chooses first available. """ vips = config('vip').split() if config('vip') else [] access_network = config('access-network') + client_ip = get_host_ip(client_hostname) if access_network: - client_ip = get_host_ip(client_hostname) if is_address_in_network(access_network, client_ip): if is_clustered(): for vip in vips: @@ -390,6 +395,22 @@ def get_db_host(client_hostname): else: log("Client address '%s' not in access-network '%s'" % (client_ip, access_network), level=WARNING) + else: + try: + # NOTE(jamespage) + # Try to use network spaces to resolve binding for + # interface, and to resolve the VIP associated with + # the binding if provided. + interface_binding = network_get_primary_address(interface) + if is_clustered() and vips: + interface_cidr = resolve_network_cidr(interface_binding) + for vip in vips: + if is_address_in_network(interface_cidr, vip): + return vip + return interface_binding + except NotImplementedError: + # NOTE(jamespage): skip - fallback to previous behaviour + pass if is_clustered() and vips: return vips[0] # NOTE on private network diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index d2ede32..d21c9c7 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment): # Charms which can not use openstack-origin, ie. many subordinates no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', - 'cinder-backup'] + 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt'] if self.openstack: for svc in services: diff --git a/unit_tests/test_percona_hooks.py b/unit_tests/test_percona_hooks.py index df989cc..6f85666 100644 --- a/unit_tests/test_percona_hooks.py +++ b/unit_tests/test_percona_hooks.py @@ -22,12 +22,19 @@ TO_PATCH = ['log', 'config', 'get_iface_for_address', 'get_netmask_for_address', 'is_bootstrapped', - 'is_sufficient_peers'] + 'is_sufficient_peers', + 'network_get_primary_address', + 'resolve_network_cidr', + 'unit_get', + 'get_host_ip', + 'is_clustered', + 'get_ipv6_addr'] -class TestHaRelation(CharmTestCase): +class TestHARelation(CharmTestCase): def setUp(self): CharmTestCase.setUp(self, hooks, TO_PATCH) + self.network_get_primary_address.side_effect = NotImplementedError @mock.patch('sys.exit') def test_relation_not_configured(self, exit_): @@ -138,3 +145,45 @@ class TestHaRelation(CharmTestCase): call_args, call_kwargs = self.relation_set.call_args self.assertEqual(resource_params, call_kwargs['resource_params']) + + +class TestHostResolution(CharmTestCase): + def setUp(self): + CharmTestCase.setUp(self, hooks, TO_PATCH) + self.network_get_primary_address.side_effect = NotImplementedError + self.is_clustered.return_value = False + self.config.side_effect = self.test_config.get + self.test_config.set('prefer-ipv6', False) + + def test_get_db_host_defaults(self): + ''' + Ensure that with nothing other than defaults private-address is used + ''' + self.unit_get.return_value = 'mydbhost' + self.get_host_ip.return_value = '10.0.0.2' + self.assertEqual(hooks.get_db_host('myclient'), 'mydbhost') + + def test_get_db_host_network_spaces(self): + ''' + Ensure that if the shared-db relation is bound, its bound address + is used + ''' + self.get_host_ip.return_value = '10.0.0.2' + self.network_get_primary_address.side_effect = None + self.network_get_primary_address.return_value = '192.168.20.2' + self.assertEqual(hooks.get_db_host('myclient'), '192.168.20.2') + self.network_get_primary_address.assert_called_with('shared-db') + + def test_get_db_host_network_spaces_clustered(self): + ''' + Ensure that if the shared-db relation is bound and the unit is + clustered, that the correct VIP is chosen + ''' + self.get_host_ip.return_value = '10.0.0.2' + self.is_clustered.return_value = True + self.test_config.set('vip', '10.0.0.100 192.168.20.200') + self.network_get_primary_address.side_effect = None + self.network_get_primary_address.return_value = '192.168.20.2' + self.resolve_network_cidr.return_value = '192.168.20.2/24' + self.assertEqual(hooks.get_db_host('myclient'), '192.168.20.200') + self.network_get_primary_address.assert_called_with('shared-db')