Add gnocchi charm and tests
Add gnocchi charm Add charmcraft and metadata files Add relevant tox files Add unit tests and zaza tests
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -7,3 +7,6 @@ __pycache__/
|
|||||||
*.py[cod]
|
*.py[cod]
|
||||||
.idea
|
.idea
|
||||||
.vscode/
|
.vscode/
|
||||||
|
*.swp
|
||||||
|
.stestr/
|
||||||
|
|
||||||
|
5
.gitreview
Normal file
5
.gitreview
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[gerrit]
|
||||||
|
host=review.opendev.org
|
||||||
|
port=29418
|
||||||
|
project=openstack/charm-gnocchi-k8s.git
|
||||||
|
defaultbranch=main
|
3
.stestr.conf
Normal file
3
.stestr.conf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
test_path=./tests/unit
|
||||||
|
top_dir=./tests
|
11
.zuul.yaml
Normal file
11
.zuul.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
- project:
|
||||||
|
templates:
|
||||||
|
- openstack-python3-charm-jobs
|
||||||
|
- openstack-cover-jobs
|
||||||
|
- microk8s-func-test
|
||||||
|
vars:
|
||||||
|
charm_build_name: gnocchi-k8s
|
||||||
|
juju_channel: 3.2/stable
|
||||||
|
juju_classic_mode: false
|
||||||
|
microk8s_channel: 1.26-strict/stable
|
||||||
|
microk8s_classic_mode: false
|
@@ -10,17 +10,15 @@ Use links instead.
|
|||||||
|
|
||||||
# gnocchi-k8s
|
# gnocchi-k8s
|
||||||
|
|
||||||
Charmhub package name: operator-template
|
Charmhub package name: gnocchi-k8s
|
||||||
More information: https://charmhub.io/gnocchi-k8s
|
More information: https://charmhub.io/gnocchi-k8s
|
||||||
|
|
||||||
Describe your charm in one or two sentences.
|
Operator for OpenStack Gnocchi.
|
||||||
|
|
||||||
## Other resources
|
## Other resources
|
||||||
|
|
||||||
<!-- If your charm is documented somewhere else other than Charmhub, provide a link separately. -->
|
<!-- If your charm is documented somewhere else other than Charmhub, provide a link separately. -->
|
||||||
|
|
||||||
- [Read more](https://example.com)
|
|
||||||
|
|
||||||
- [Contributing](CONTRIBUTING.md) <!-- or link to other contribution documentation -->
|
- [Contributing](CONTRIBUTING.md) <!-- or link to other contribution documentation -->
|
||||||
|
|
||||||
- See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
|
- See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
|
||||||
|
@@ -7,11 +7,22 @@ bases:
|
|||||||
- name: "ubuntu"
|
- name: "ubuntu"
|
||||||
channel: "22.04"
|
channel: "22.04"
|
||||||
parts:
|
parts:
|
||||||
|
update-certificates:
|
||||||
|
plugin: nil
|
||||||
|
override-build: |
|
||||||
|
apt update
|
||||||
|
apt install -y ca-certificates
|
||||||
|
update-ca-certificates
|
||||||
|
|
||||||
charm:
|
charm:
|
||||||
|
after: [update-certificates]
|
||||||
build-packages:
|
build-packages:
|
||||||
- git
|
- git
|
||||||
- libffi-dev
|
- libffi-dev
|
||||||
- libssl-dev
|
- libssl-dev
|
||||||
|
- rustc
|
||||||
|
- cargo
|
||||||
|
- pkg-config
|
||||||
charm-binary-python-packages:
|
charm-binary-python-packages:
|
||||||
- cryptography
|
- cryptography
|
||||||
- jsonschema
|
- jsonschema
|
||||||
|
230
config.yaml
230
config.yaml
@@ -25,3 +25,233 @@ options:
|
|||||||
default: RegionOne
|
default: RegionOne
|
||||||
description: Space delimited list of OpenStack regions
|
description: Space delimited list of OpenStack regions
|
||||||
type: string
|
type: string
|
||||||
|
ceph-osd-replication-count:
|
||||||
|
default: 3
|
||||||
|
type: int
|
||||||
|
description: |
|
||||||
|
This value dictates the number of replicas ceph must make of any
|
||||||
|
object it stores within the cinder rbd pool. Of course, this only
|
||||||
|
applies if using Ceph as a backend store. Note that once the cinder
|
||||||
|
rbd pool has been created, changing this value will not have any
|
||||||
|
effect (although it can be changed in ceph by manually configuring
|
||||||
|
your ceph cluster).
|
||||||
|
ceph-pool-weight:
|
||||||
|
type: int
|
||||||
|
default: 40
|
||||||
|
description: |
|
||||||
|
Defines a relative weighting of the pool as a percentage of the total
|
||||||
|
amount of data in the Ceph cluster. This effectively weights the number
|
||||||
|
of placement groups for the pool created to be appropriately portioned
|
||||||
|
to the amount of data expected. For example, if the ephemeral volumes
|
||||||
|
for the OpenStack compute instances are expected to take up 20% of the
|
||||||
|
overall configuration then this value would be specified as 20. Note -
|
||||||
|
it is important to choose an appropriate value for the pool weight as
|
||||||
|
this directly affects the number of placement groups which will be
|
||||||
|
created for the pool. The number of placement groups for a pool can
|
||||||
|
only be increased, never decreased - so it is important to identify the
|
||||||
|
percent of data that will likely reside in the pool.
|
||||||
|
volume-backend-name:
|
||||||
|
default:
|
||||||
|
type: string
|
||||||
|
description: |
|
||||||
|
Volume backend name for the backend. The default value is the
|
||||||
|
application name in the Juju model, e.g. "cinder-ceph-mybackend"
|
||||||
|
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
|
||||||
|
A common backend name can be set to multiple backends with the
|
||||||
|
same characters so that those can be treated as a single virtual
|
||||||
|
backend associated with a single volume type.
|
||||||
|
backend-availability-zone:
|
||||||
|
default:
|
||||||
|
type: string
|
||||||
|
description: |
|
||||||
|
Availability zone name of this volume backend. If set, it will
|
||||||
|
override the default availability zone. Supported for Pike or
|
||||||
|
newer releases.
|
||||||
|
restrict-ceph-pools:
|
||||||
|
default: False
|
||||||
|
type: boolean
|
||||||
|
description: |
|
||||||
|
Optionally restrict Ceph key permissions to access pools as required.
|
||||||
|
rbd-pool-name:
|
||||||
|
default:
|
||||||
|
type: string
|
||||||
|
description: |
|
||||||
|
Optionally specify an existing rbd pool that cinder should map to.
|
||||||
|
rbd-flatten-volume-from-snapshot:
|
||||||
|
default:
|
||||||
|
type: boolean
|
||||||
|
default: False
|
||||||
|
description: |
|
||||||
|
Flatten volumes created from snapshots to remove dependency from
|
||||||
|
volume to snapshot. Supported on Queens+
|
||||||
|
rbd-mirroring-mode:
|
||||||
|
type: string
|
||||||
|
default: pool
|
||||||
|
description: |
|
||||||
|
The RBD mirroring mode used for the Ceph pool. This option is only used
|
||||||
|
with 'replicated' pool type, as it's not supported for 'erasure-coded'
|
||||||
|
pool type - valid values: 'pool' and 'image'
|
||||||
|
pool-type:
|
||||||
|
type: string
|
||||||
|
default: replicated
|
||||||
|
description: |
|
||||||
|
Ceph pool type to use for storage - valid values include ‘replicated’
|
||||||
|
and ‘erasure-coded’.
|
||||||
|
ec-profile-name:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Name for the EC profile to be created for the EC pools. If not defined
|
||||||
|
a profile name will be generated based on the name of the pool used by
|
||||||
|
the application.
|
||||||
|
ec-rbd-metadata-pool:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Name of the metadata pool to be created (for RBD use-cases). If not
|
||||||
|
defined a metadata pool name will be generated based on the name of
|
||||||
|
the data pool used by the application. The metadata pool is always
|
||||||
|
replicated, not erasure coded.
|
||||||
|
ec-profile-k:
|
||||||
|
type: int
|
||||||
|
default: 1
|
||||||
|
description: |
|
||||||
|
Number of data chunks that will be used for EC data pool. K+M factors
|
||||||
|
should never be greater than the number of available zones (or hosts)
|
||||||
|
for balancing.
|
||||||
|
ec-profile-m:
|
||||||
|
type: int
|
||||||
|
default: 2
|
||||||
|
description: |
|
||||||
|
Number of coding chunks that will be used for EC data pool. K+M factors
|
||||||
|
should never be greater than the number of available zones (or hosts)
|
||||||
|
for balancing.
|
||||||
|
ec-profile-locality:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
(lrc plugin - l) Group the coding and data chunks into sets of size l.
|
||||||
|
For instance, for k=4 and m=2, when l=3 two groups of three are created.
|
||||||
|
Each set can be recovered without reading chunks from another set. Note
|
||||||
|
that using the lrc plugin does incur more raw storage usage than isa or
|
||||||
|
jerasure in order to reduce the cost of recovery operations.
|
||||||
|
ec-profile-crush-locality:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
(lrc plugin) The type of the crush bucket in which each set of chunks
|
||||||
|
defined by l will be stored. For instance, if it is set to rack, each
|
||||||
|
group of l chunks will be placed in a different rack. It is used to
|
||||||
|
create a CRUSH rule step such as step choose rack. If it is not set,
|
||||||
|
no such grouping is done.
|
||||||
|
ec-profile-durability-estimator:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
(shec plugin - c) The number of parity chunks each of which includes
|
||||||
|
each data chunk in its calculation range. The number is used as a
|
||||||
|
durability estimator. For instance, if c=2, 2 OSDs can be down
|
||||||
|
without losing data.
|
||||||
|
ec-profile-helper-chunks:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
(clay plugin - d) Number of OSDs requested to send data during
|
||||||
|
recovery of a single chunk. d needs to be chosen such that
|
||||||
|
k+1 <= d <= k+m-1. Larger the d, the better the savings.
|
||||||
|
ec-profile-scalar-mds:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
(clay plugin) specifies the plugin that is used as a building
|
||||||
|
block in the layered construction. It can be one of jerasure,
|
||||||
|
isa, shec (defaults to jerasure).
|
||||||
|
ec-profile-plugin:
|
||||||
|
type: string
|
||||||
|
default: jerasure
|
||||||
|
description: |
|
||||||
|
EC plugin to use for this applications pool. The following list of
|
||||||
|
plugins acceptable - jerasure, lrc, isa, shec, clay.
|
||||||
|
ec-profile-technique:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
EC profile technique used for this applications pool - will be
|
||||||
|
validated based on the plugin configured via ec-profile-plugin.
|
||||||
|
Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’,
|
||||||
|
‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure,
|
||||||
|
‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’
|
||||||
|
for shec.
|
||||||
|
ec-profile-device-class:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Device class from CRUSH map to use for placement groups for
|
||||||
|
erasure profile - valid values: ssd, hdd or nvme (or leave
|
||||||
|
unset to not use a device class).
|
||||||
|
bluestore-compression-algorithm:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Compressor to use (if any) for pools requested by this charm.
|
||||||
|
.
|
||||||
|
NOTE: The ceph-osd charm sets a global default for this value (defaults
|
||||||
|
to 'lz4' unless configured by the end user) which will be used unless
|
||||||
|
specified for individual pools.
|
||||||
|
bluestore-compression-mode:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Policy for using compression on pools requested by this charm.
|
||||||
|
.
|
||||||
|
'none' means never use compression.
|
||||||
|
'passive' means use compression when clients hint that data is
|
||||||
|
compressible.
|
||||||
|
'aggressive' means use compression unless clients hint that
|
||||||
|
data is not compressible.
|
||||||
|
'force' means use compression under all circumstances even if the clients
|
||||||
|
hint that the data is not compressible.
|
||||||
|
bluestore-compression-required-ratio:
|
||||||
|
type: float
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
The ratio of the size of the data chunk after compression relative to the
|
||||||
|
original size must be at least this small in order to store the
|
||||||
|
compressed version on pools requested by this charm.
|
||||||
|
bluestore-compression-min-blob-size:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Chunks smaller than this are never compressed on pools requested by
|
||||||
|
this charm.
|
||||||
|
bluestore-compression-min-blob-size-hdd:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Value of bluestore compression min blob size for rotational media on
|
||||||
|
pools requested by this charm.
|
||||||
|
bluestore-compression-min-blob-size-ssd:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Value of bluestore compression min blob size for solid state media on
|
||||||
|
pools requested by this charm.
|
||||||
|
bluestore-compression-max-blob-size:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Chunks larger than this are broken into smaller blobs sizing bluestore
|
||||||
|
compression max blob size before being compressed on pools requested by
|
||||||
|
this charm.
|
||||||
|
bluestore-compression-max-blob-size-hdd:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Value of bluestore compression max blob size for rotational media on
|
||||||
|
pools requested by this charm.
|
||||||
|
bluestore-compression-max-blob-size-ssd:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Value of bluestore compression max blob size for solid state media on
|
||||||
|
pools requested by this charm.
|
||||||
|
7
fetch-libs.sh
Executable file
7
fetch-libs.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "INFO: Fetching libs from charmhub."
|
||||||
|
charmcraft fetch-lib charms.data_platform_libs.v0.database_requires
|
||||||
|
charmcraft fetch-lib charms.keystone_k8s.v1.identity_service
|
||||||
|
charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq
|
||||||
|
charmcraft fetch-lib charms.traefik_k8s.v1.ingress
|
537
lib/charms/data_platform_libs/v0/database_requires.py
Normal file
537
lib/charms/data_platform_libs/v0/database_requires.py
Normal file
@@ -0,0 +1,537 @@
|
|||||||
|
# Copyright 2023 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
r"""[DEPRECATED] Relation 'requires' side abstraction for database relation.
|
||||||
|
|
||||||
|
This library is a uniform interface to a selection of common database
|
||||||
|
metadata, with added custom events that add convenience to database management,
|
||||||
|
and methods to consume the application related data.
|
||||||
|
|
||||||
|
Following an example of using the DatabaseCreatedEvent, in the context of the
|
||||||
|
application charm code:
|
||||||
|
|
||||||
|
```python
|
||||||
|
|
||||||
|
from charms.data_platform_libs.v0.database_requires import (
|
||||||
|
DatabaseCreatedEvent,
|
||||||
|
DatabaseRequires,
|
||||||
|
)
|
||||||
|
|
||||||
|
class ApplicationCharm(CharmBase):
|
||||||
|
# Application charm that connects to database charms.
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
super().__init__(*args)
|
||||||
|
|
||||||
|
# Charm events defined in the database requires charm library.
|
||||||
|
self.database = DatabaseRequires(self, relation_name="database", database_name="database")
|
||||||
|
self.framework.observe(self.database.on.database_created, self._on_database_created)
|
||||||
|
|
||||||
|
def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||||
|
# Handle the created database
|
||||||
|
|
||||||
|
# Create configuration file for app
|
||||||
|
config_file = self._render_app_config_file(
|
||||||
|
event.username,
|
||||||
|
event.password,
|
||||||
|
event.endpoints,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start application with rendered configuration
|
||||||
|
self._start_application(config_file)
|
||||||
|
|
||||||
|
# Set active status
|
||||||
|
self.unit.status = ActiveStatus("received database credentials")
|
||||||
|
```
|
||||||
|
|
||||||
|
As shown above, the library provides some custom events to handle specific situations,
|
||||||
|
which are listed below:
|
||||||
|
|
||||||
|
— database_created: event emitted when the requested database is created.
|
||||||
|
— endpoints_changed: event emitted when the read/write endpoints of the database have changed.
|
||||||
|
— read_only_endpoints_changed: event emitted when the read-only endpoints of the database
|
||||||
|
have changed. Event is not triggered if read/write endpoints changed too.
|
||||||
|
|
||||||
|
If it is needed to connect multiple database clusters to the same relation endpoint
|
||||||
|
the application charm can implement the same code as if it would connect to only
|
||||||
|
one database cluster (like the above code example).
|
||||||
|
|
||||||
|
To differentiate multiple clusters connected to the same relation endpoint
|
||||||
|
the application charm can use the name of the remote application:
|
||||||
|
|
||||||
|
```python
|
||||||
|
|
||||||
|
def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||||
|
# Get the remote app name of the cluster that triggered this event
|
||||||
|
cluster = event.relation.app.name
|
||||||
|
```
|
||||||
|
|
||||||
|
It is also possible to provide an alias for each different database cluster/relation.
|
||||||
|
|
||||||
|
So, it is possible to differentiate the clusters in two ways.
|
||||||
|
The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
|
||||||
|
|
||||||
|
The second way is to use different event handlers to handle each cluster events.
|
||||||
|
The implementation would be something like the following code:
|
||||||
|
|
||||||
|
```python
|
||||||
|
|
||||||
|
from charms.data_platform_libs.v0.database_requires import (
|
||||||
|
DatabaseCreatedEvent,
|
||||||
|
DatabaseRequires,
|
||||||
|
)
|
||||||
|
|
||||||
|
class ApplicationCharm(CharmBase):
|
||||||
|
# Application charm that connects to database charms.
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
super().__init__(*args)
|
||||||
|
|
||||||
|
# Define the cluster aliases and one handler for each cluster database created event.
|
||||||
|
self.database = DatabaseRequires(
|
||||||
|
self,
|
||||||
|
relation_name="database",
|
||||||
|
database_name="database",
|
||||||
|
relations_aliases = ["cluster1", "cluster2"],
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.database.on.cluster1_database_created, self._on_cluster1_database_created
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.database.on.cluster2_database_created, self._on_cluster2_database_created
|
||||||
|
)
|
||||||
|
|
||||||
|
def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||||
|
# Handle the created database on the cluster named cluster1
|
||||||
|
|
||||||
|
# Create configuration file for app
|
||||||
|
config_file = self._render_app_config_file(
|
||||||
|
event.username,
|
||||||
|
event.password,
|
||||||
|
event.endpoints,
|
||||||
|
)
|
||||||
|
...
|
||||||
|
|
||||||
|
def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
|
||||||
|
# Handle the created database on the cluster named cluster2
|
||||||
|
|
||||||
|
# Create configuration file for app
|
||||||
|
config_file = self._render_app_config_file(
|
||||||
|
event.username,
|
||||||
|
event.password,
|
||||||
|
event.endpoints,
|
||||||
|
)
|
||||||
|
...
|
||||||
|
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from collections import namedtuple
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from ops.charm import (
|
||||||
|
CharmEvents,
|
||||||
|
RelationChangedEvent,
|
||||||
|
RelationEvent,
|
||||||
|
RelationJoinedEvent,
|
||||||
|
)
|
||||||
|
from ops.framework import EventSource, Object
|
||||||
|
from ops.model import Relation
|
||||||
|
|
||||||
|
# The unique Charmhub library identifier, never change it
|
||||||
|
LIBID = "0241e088ffa9440fb4e3126349b2fb62"
|
||||||
|
|
||||||
|
# Increment this major API version when introducing breaking changes
|
||||||
|
LIBAPI = 0
|
||||||
|
|
||||||
|
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||||
|
# to 0 if you are raising the major API version.
|
||||||
|
LIBPATCH = 6
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseEvent(RelationEvent):
|
||||||
|
"""Base class for database events."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def endpoints(self) -> Optional[str]:
|
||||||
|
"""Returns a comma separated list of read/write endpoints."""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("endpoints")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def password(self) -> Optional[str]:
|
||||||
|
"""Returns the password for the created user."""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("password")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def read_only_endpoints(self) -> Optional[str]:
|
||||||
|
"""Returns a comma separated list of read only endpoints."""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("read-only-endpoints")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def replset(self) -> Optional[str]:
|
||||||
|
"""Returns the replicaset name.
|
||||||
|
|
||||||
|
MongoDB only.
|
||||||
|
"""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("replset")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tls(self) -> Optional[str]:
|
||||||
|
"""Returns whether TLS is configured."""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("tls")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tls_ca(self) -> Optional[str]:
|
||||||
|
"""Returns TLS CA."""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("tls-ca")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def uris(self) -> Optional[str]:
|
||||||
|
"""Returns the connection URIs.
|
||||||
|
|
||||||
|
MongoDB, Redis, OpenSearch and Kafka only.
|
||||||
|
"""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("uris")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def username(self) -> Optional[str]:
|
||||||
|
"""Returns the created username."""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("username")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def version(self) -> Optional[str]:
|
||||||
|
"""Returns the version of the database.
|
||||||
|
|
||||||
|
Version as informed by the database daemon.
|
||||||
|
"""
|
||||||
|
if not self.relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.relation.data[self.relation.app].get("version")
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseCreatedEvent(DatabaseEvent):
|
||||||
|
"""Event emitted when a new database is created for use on this relation."""
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseEndpointsChangedEvent(DatabaseEvent):
|
||||||
|
"""Event emitted when the read/write endpoints are changed."""
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseReadOnlyEndpointsChangedEvent(DatabaseEvent):
|
||||||
|
"""Event emitted when the read only endpoints are changed."""
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseEvents(CharmEvents):
|
||||||
|
"""Database events.
|
||||||
|
|
||||||
|
This class defines the events that the database can emit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
database_created = EventSource(DatabaseCreatedEvent)
|
||||||
|
endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
|
||||||
|
read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
|
||||||
|
|
||||||
|
|
||||||
|
Diff = namedtuple("Diff", "added changed deleted")
|
||||||
|
Diff.__doc__ = """
|
||||||
|
A tuple for storing the diff between two data mappings.
|
||||||
|
|
||||||
|
— added — keys that were added.
|
||||||
|
— changed — keys that still exist but have new values.
|
||||||
|
— deleted — keys that were deleted.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseRequires(Object):
|
||||||
|
"""Requires-side of the database relation."""
|
||||||
|
|
||||||
|
on = DatabaseEvents() # pyright: ignore [reportGeneralTypeIssues]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
charm,
|
||||||
|
relation_name: str,
|
||||||
|
database_name: str,
|
||||||
|
extra_user_roles: Optional[str] = None,
|
||||||
|
relations_aliases: Optional[List[str]] = None,
|
||||||
|
):
|
||||||
|
"""Manager of database client relations."""
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
self.charm = charm
|
||||||
|
self.database = database_name
|
||||||
|
self.extra_user_roles = extra_user_roles
|
||||||
|
self.local_app = self.charm.model.app
|
||||||
|
self.local_unit = self.charm.unit
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.relations_aliases = relations_aliases
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define custom event names for each alias.
|
||||||
|
if relations_aliases:
|
||||||
|
# Ensure the number of aliases does not exceed the maximum
|
||||||
|
# of connections allowed in the specific relation.
|
||||||
|
relation_connection_limit = self.charm.meta.requires[relation_name].limit
|
||||||
|
if len(relations_aliases) != relation_connection_limit:
|
||||||
|
raise ValueError(
|
||||||
|
f"The number of aliases must match the maximum number of connections allowed in the relation. "
|
||||||
|
f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
for relation_alias in relations_aliases:
|
||||||
|
self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
|
||||||
|
self.on.define_event(
|
||||||
|
f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
|
||||||
|
)
|
||||||
|
self.on.define_event(
|
||||||
|
f"{relation_alias}_read_only_endpoints_changed",
|
||||||
|
DatabaseReadOnlyEndpointsChangedEvent,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _assign_relation_alias(self, relation_id: int) -> None:
|
||||||
|
"""Assigns an alias to a relation.
|
||||||
|
|
||||||
|
This function writes in the unit data bag.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
relation_id: the identifier for a particular relation.
|
||||||
|
"""
|
||||||
|
# If no aliases were provided, return immediately.
|
||||||
|
if not self.relations_aliases:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Return if an alias was already assigned to this relation
|
||||||
|
# (like when there are more than one unit joining the relation).
|
||||||
|
if (
|
||||||
|
self.charm.model.get_relation(self.relation_name, relation_id)
|
||||||
|
.data[self.local_unit]
|
||||||
|
.get("alias")
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Retrieve the available aliases (the ones that weren't assigned to any relation).
|
||||||
|
available_aliases = self.relations_aliases[:]
|
||||||
|
for relation in self.charm.model.relations[self.relation_name]:
|
||||||
|
alias = relation.data[self.local_unit].get("alias")
|
||||||
|
if alias:
|
||||||
|
logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
|
||||||
|
available_aliases.remove(alias)
|
||||||
|
|
||||||
|
# Set the alias in the unit relation databag of the specific relation.
|
||||||
|
relation = self.charm.model.get_relation(self.relation_name, relation_id)
|
||||||
|
relation.data[self.local_unit].update({"alias": available_aliases[0]})
|
||||||
|
|
||||||
|
def _diff(self, event: RelationChangedEvent) -> Diff:
|
||||||
|
"""Retrieves the diff of the data in the relation changed databag.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: relation changed event.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
a Diff instance containing the added, deleted and changed
|
||||||
|
keys from the event relation databag.
|
||||||
|
"""
|
||||||
|
# Retrieve the old data from the data key in the local unit relation databag.
|
||||||
|
old_data = json.loads(event.relation.data[self.local_unit].get("data", "{}"))
|
||||||
|
# Retrieve the new data from the event relation databag.
|
||||||
|
new_data = (
|
||||||
|
{key: value for key, value in event.relation.data[event.app].items() if key != "data"}
|
||||||
|
if event.app
|
||||||
|
else {}
|
||||||
|
)
|
||||||
|
|
||||||
|
# These are the keys that were added to the databag and triggered this event.
|
||||||
|
added = new_data.keys() - old_data.keys()
|
||||||
|
# These are the keys that were removed from the databag and triggered this event.
|
||||||
|
deleted = old_data.keys() - new_data.keys()
|
||||||
|
# These are the keys that already existed in the databag,
|
||||||
|
# but had their values changed.
|
||||||
|
changed = {
|
||||||
|
key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: evaluate the possibility of losing the diff if some error
|
||||||
|
# happens in the charm before the diff is completely checked (DPE-412).
|
||||||
|
# Convert the new_data to a serializable format and save it for a next diff check.
|
||||||
|
event.relation.data[self.local_unit].update({"data": json.dumps(new_data)})
|
||||||
|
|
||||||
|
# Return the diff with all possible changes.
|
||||||
|
return Diff(added, changed, deleted)
|
||||||
|
|
||||||
|
def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
|
||||||
|
"""Emit an aliased event to a particular relation if it has an alias.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: the relation changed event that was received.
|
||||||
|
event_name: the name of the event to emit.
|
||||||
|
"""
|
||||||
|
alias = self._get_relation_alias(event.relation.id)
|
||||||
|
if alias:
|
||||||
|
getattr(self.on, f"{alias}_{event_name}").emit(
|
||||||
|
event.relation, app=event.app, unit=event.unit
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_relation_alias(self, relation_id: int) -> Optional[str]:
|
||||||
|
"""Returns the relation alias.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
relation_id: the identifier for a particular relation.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
the relation alias or None if the relation was not found.
|
||||||
|
"""
|
||||||
|
for relation in self.charm.model.relations[self.relation_name]:
|
||||||
|
if relation.id == relation_id:
|
||||||
|
return relation.data[self.local_unit].get("alias")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def fetch_relation_data(self) -> dict:
|
||||||
|
"""Retrieves data from relation.
|
||||||
|
|
||||||
|
This function can be used to retrieve data from a relation
|
||||||
|
in the charm code when outside an event callback.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
a dict of the values stored in the relation data bag
|
||||||
|
for all relation instances (indexed by the relation ID).
|
||||||
|
"""
|
||||||
|
data = {}
|
||||||
|
for relation in self.relations:
|
||||||
|
data[relation.id] = (
|
||||||
|
{key: value for key, value in relation.data[relation.app].items() if key != "data"}
|
||||||
|
if relation.app
|
||||||
|
else {}
|
||||||
|
)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _update_relation_data(self, relation_id: int, data: dict) -> None:
|
||||||
|
"""Updates a set of key-value pairs in the relation.
|
||||||
|
|
||||||
|
This function writes in the application data bag, therefore,
|
||||||
|
only the leader unit can call it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
relation_id: the identifier for a particular relation.
|
||||||
|
data: dict containing the key-value pairs
|
||||||
|
that should be updated in the relation.
|
||||||
|
"""
|
||||||
|
if self.local_unit.is_leader():
|
||||||
|
relation = self.charm.model.get_relation(self.relation_name, relation_id)
|
||||||
|
relation.data[self.local_app].update(data)
|
||||||
|
|
||||||
|
def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
|
||||||
|
"""Event emitted when the application joins the database relation."""
|
||||||
|
# If relations aliases were provided, assign one to the relation.
|
||||||
|
self._assign_relation_alias(event.relation.id)
|
||||||
|
|
||||||
|
# Sets both database and extra user roles in the relation
|
||||||
|
# if the roles are provided. Otherwise, sets only the database.
|
||||||
|
if self.extra_user_roles:
|
||||||
|
self._update_relation_data(
|
||||||
|
event.relation.id,
|
||||||
|
{
|
||||||
|
"database": self.database,
|
||||||
|
"extra-user-roles": self.extra_user_roles,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._update_relation_data(event.relation.id, {"database": self.database})
|
||||||
|
|
||||||
|
def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
|
||||||
|
"""Event emitted when the database relation has changed."""
|
||||||
|
# Check which data has changed to emit customs events.
|
||||||
|
diff = self._diff(event)
|
||||||
|
|
||||||
|
# Check if the database is created
|
||||||
|
# (the database charm shared the credentials).
|
||||||
|
if "username" in diff.added and "password" in diff.added:
|
||||||
|
# Emit the default event (the one without an alias).
|
||||||
|
logger.info("database created at %s", datetime.now())
|
||||||
|
getattr(self.on, "database_created").emit(
|
||||||
|
event.relation, app=event.app, unit=event.unit
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit the aliased event (if any).
|
||||||
|
self._emit_aliased_event(event, "database_created")
|
||||||
|
|
||||||
|
# To avoid unnecessary application restarts do not trigger
|
||||||
|
# “endpoints_changed“ event if “database_created“ is triggered.
|
||||||
|
return
|
||||||
|
|
||||||
|
# Emit an endpoints changed event if the database
|
||||||
|
# added or changed this info in the relation databag.
|
||||||
|
if "endpoints" in diff.added or "endpoints" in diff.changed:
|
||||||
|
# Emit the default event (the one without an alias).
|
||||||
|
logger.info("endpoints changed on %s", datetime.now())
|
||||||
|
getattr(self.on, "endpoints_changed").emit(
|
||||||
|
event.relation, app=event.app, unit=event.unit
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit the aliased event (if any).
|
||||||
|
self._emit_aliased_event(event, "endpoints_changed")
|
||||||
|
|
||||||
|
# To avoid unnecessary application restarts do not trigger
|
||||||
|
# “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered.
|
||||||
|
return
|
||||||
|
|
||||||
|
# Emit a read only endpoints changed event if the database
|
||||||
|
# added or changed this info in the relation databag.
|
||||||
|
if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
|
||||||
|
# Emit the default event (the one without an alias).
|
||||||
|
logger.info("read-only-endpoints changed on %s", datetime.now())
|
||||||
|
getattr(self.on, "read_only_endpoints_changed").emit(
|
||||||
|
event.relation, app=event.app, unit=event.unit
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit the aliased event (if any).
|
||||||
|
self._emit_aliased_event(event, "read_only_endpoints_changed")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def relations(self) -> List[Relation]:
|
||||||
|
"""The list of Relation instances associated with this relation_name."""
|
||||||
|
return list(self.charm.model.relations[self.relation_name])
|
525
lib/charms/keystone_k8s/v1/identity_service.py
Normal file
525
lib/charms/keystone_k8s/v1/identity_service.py
Normal file
@@ -0,0 +1,525 @@
|
|||||||
|
"""IdentityServiceProvides and Requires module.
|
||||||
|
|
||||||
|
|
||||||
|
This library contains the Requires and Provides classes for handling
|
||||||
|
the identity_service interface.
|
||||||
|
|
||||||
|
Import `IdentityServiceRequires` in your charm, with the charm object and the
|
||||||
|
relation name:
|
||||||
|
- self
|
||||||
|
- "identity_service"
|
||||||
|
|
||||||
|
Also provide additional parameters to the charm object:
|
||||||
|
- service
|
||||||
|
- internal_url
|
||||||
|
- public_url
|
||||||
|
- admin_url
|
||||||
|
- region
|
||||||
|
- username
|
||||||
|
- vhost
|
||||||
|
|
||||||
|
Two events are also available to respond to:
|
||||||
|
- connected
|
||||||
|
- ready
|
||||||
|
- goneaway
|
||||||
|
|
||||||
|
A basic example showing the usage of this relation follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
from charms.keystone_k8s.v1.identity_service import IdentityServiceRequires
|
||||||
|
|
||||||
|
class IdentityServiceClientCharm(CharmBase):
|
||||||
|
def __init__(self, *args):
|
||||||
|
super().__init__(*args)
|
||||||
|
# IdentityService Requires
|
||||||
|
self.identity_service = IdentityServiceRequires(
|
||||||
|
self, "identity_service",
|
||||||
|
service = "my-service"
|
||||||
|
internal_url = "http://internal-url"
|
||||||
|
public_url = "http://public-url"
|
||||||
|
admin_url = "http://admin-url"
|
||||||
|
region = "region"
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.identity_service.on.connected, self._on_identity_service_connected)
|
||||||
|
self.framework.observe(
|
||||||
|
self.identity_service.on.ready, self._on_identity_service_ready)
|
||||||
|
self.framework.observe(
|
||||||
|
self.identity_service.on.goneaway, self._on_identity_service_goneaway)
|
||||||
|
|
||||||
|
def _on_identity_service_connected(self, event):
|
||||||
|
'''React to the IdentityService connected event.
|
||||||
|
|
||||||
|
This event happens when n IdentityService relation is added to the
|
||||||
|
model before credentials etc have been provided.
|
||||||
|
'''
|
||||||
|
# Do something before the relation is complete
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _on_identity_service_ready(self, event):
|
||||||
|
'''React to the IdentityService ready event.
|
||||||
|
|
||||||
|
The IdentityService interface will use the provided config for the
|
||||||
|
request to the identity server.
|
||||||
|
'''
|
||||||
|
# IdentityService Relation is ready. Do something with the completed relation.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _on_identity_service_goneaway(self, event):
|
||||||
|
'''React to the IdentityService goneaway event.
|
||||||
|
|
||||||
|
This event happens when an IdentityService relation is removed.
|
||||||
|
'''
|
||||||
|
# IdentityService Relation has goneaway. shutdown services or suchlike
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from ops.framework import (
|
||||||
|
StoredState,
|
||||||
|
EventBase,
|
||||||
|
ObjectEvents,
|
||||||
|
EventSource,
|
||||||
|
Object,
|
||||||
|
)
|
||||||
|
from ops.model import (
|
||||||
|
Relation,
|
||||||
|
SecretNotFoundError,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# The unique Charmhub library identifier, never change it
|
||||||
|
LIBID = "0fa7fe7236c14c6e9624acf232b9a3b0"
|
||||||
|
|
||||||
|
# Increment this major API version when introducing breaking changes
|
||||||
|
LIBAPI = 1
|
||||||
|
|
||||||
|
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||||
|
# to 0 if you are raising the major API version
|
||||||
|
LIBPATCH = 1
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceConnectedEvent(EventBase):
|
||||||
|
"""IdentityService connected Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceReadyEvent(EventBase):
|
||||||
|
"""IdentityService ready for use Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceGoneAwayEvent(EventBase):
|
||||||
|
"""IdentityService relation has gone-away Event"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceServerEvents(ObjectEvents):
|
||||||
|
"""Events class for `on`"""
|
||||||
|
|
||||||
|
connected = EventSource(IdentityServiceConnectedEvent)
|
||||||
|
ready = EventSource(IdentityServiceReadyEvent)
|
||||||
|
goneaway = EventSource(IdentityServiceGoneAwayEvent)
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceRequires(Object):
|
||||||
|
"""
|
||||||
|
IdentityServiceRequires class
|
||||||
|
"""
|
||||||
|
|
||||||
|
on = IdentityServiceServerEvents()
|
||||||
|
_stored = StoredState()
|
||||||
|
|
||||||
|
def __init__(self, charm, relation_name: str, service_endpoints: dict,
|
||||||
|
region: str):
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
self.charm = charm
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.service_endpoints = service_endpoints
|
||||||
|
self.region = region
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_joined,
|
||||||
|
self._on_identity_service_relation_joined,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_changed,
|
||||||
|
self._on_identity_service_relation_changed,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_departed,
|
||||||
|
self._on_identity_service_relation_changed,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_broken,
|
||||||
|
self._on_identity_service_relation_broken,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _on_identity_service_relation_joined(self, event):
|
||||||
|
"""IdentityService relation joined."""
|
||||||
|
logging.debug("IdentityService on_joined")
|
||||||
|
self.on.connected.emit()
|
||||||
|
self.register_services(
|
||||||
|
self.service_endpoints,
|
||||||
|
self.region)
|
||||||
|
|
||||||
|
def _on_identity_service_relation_changed(self, event):
|
||||||
|
"""IdentityService relation changed."""
|
||||||
|
logging.debug("IdentityService on_changed")
|
||||||
|
try:
|
||||||
|
self.service_password
|
||||||
|
self.on.ready.emit()
|
||||||
|
except (AttributeError, KeyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _on_identity_service_relation_broken(self, event):
|
||||||
|
"""IdentityService relation broken."""
|
||||||
|
logging.debug("IdentityService on_broken")
|
||||||
|
self.on.goneaway.emit()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _identity_service_rel(self) -> Relation:
|
||||||
|
"""The IdentityService relation."""
|
||||||
|
return self.framework.model.get_relation(self.relation_name)
|
||||||
|
|
||||||
|
def get_remote_app_data(self, key: str) -> str:
|
||||||
|
"""Return the value for the given key from remote app data."""
|
||||||
|
data = self._identity_service_rel.data[self._identity_service_rel.app]
|
||||||
|
return data.get(key)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def api_version(self) -> str:
|
||||||
|
"""Return the api_version."""
|
||||||
|
return self.get_remote_app_data('api-version')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_host(self) -> str:
|
||||||
|
"""Return the auth_host."""
|
||||||
|
return self.get_remote_app_data('auth-host')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_port(self) -> str:
|
||||||
|
"""Return the auth_port."""
|
||||||
|
return self.get_remote_app_data('auth-port')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_protocol(self) -> str:
|
||||||
|
"""Return the auth_protocol."""
|
||||||
|
return self.get_remote_app_data('auth-protocol')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def internal_host(self) -> str:
|
||||||
|
"""Return the internal_host."""
|
||||||
|
return self.get_remote_app_data('internal-host')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def internal_port(self) -> str:
|
||||||
|
"""Return the internal_port."""
|
||||||
|
return self.get_remote_app_data('internal-port')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def internal_protocol(self) -> str:
|
||||||
|
"""Return the internal_protocol."""
|
||||||
|
return self.get_remote_app_data('internal-protocol')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_domain_name(self) -> str:
|
||||||
|
"""Return the admin_domain_name."""
|
||||||
|
return self.get_remote_app_data('admin-domain-name')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_domain_id(self) -> str:
|
||||||
|
"""Return the admin_domain_id."""
|
||||||
|
return self.get_remote_app_data('admin-domain-id')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_project_name(self) -> str:
|
||||||
|
"""Return the admin_project_name."""
|
||||||
|
return self.get_remote_app_data('admin-project-name')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_project_id(self) -> str:
|
||||||
|
"""Return the admin_project_id."""
|
||||||
|
return self.get_remote_app_data('admin-project-id')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_user_name(self) -> str:
|
||||||
|
"""Return the admin_user_name."""
|
||||||
|
return self.get_remote_app_data('admin-user-name')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_user_id(self) -> str:
|
||||||
|
"""Return the admin_user_id."""
|
||||||
|
return self.get_remote_app_data('admin-user-id')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_domain_name(self) -> str:
|
||||||
|
"""Return the service_domain_name."""
|
||||||
|
return self.get_remote_app_data('service-domain-name')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_domain_id(self) -> str:
|
||||||
|
"""Return the service_domain_id."""
|
||||||
|
return self.get_remote_app_data('service-domain-id')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_host(self) -> str:
|
||||||
|
"""Return the service_host."""
|
||||||
|
return self.get_remote_app_data('service-host')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_credentials(self) -> str:
|
||||||
|
"""Return the service_credentials secret."""
|
||||||
|
return self.get_remote_app_data('service-credentials')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_password(self) -> str:
|
||||||
|
"""Return the service_password."""
|
||||||
|
credentials_id = self.get_remote_app_data('service-credentials')
|
||||||
|
if not credentials_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||||
|
return credentials.get_content().get("password")
|
||||||
|
except SecretNotFoundError:
|
||||||
|
logger.warning(f"Secret {credentials_id} not found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_port(self) -> str:
|
||||||
|
"""Return the service_port."""
|
||||||
|
return self.get_remote_app_data('service-port')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_protocol(self) -> str:
|
||||||
|
"""Return the service_protocol."""
|
||||||
|
return self.get_remote_app_data('service-protocol')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_project_name(self) -> str:
|
||||||
|
"""Return the service_project_name."""
|
||||||
|
return self.get_remote_app_data('service-project-name')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_project_id(self) -> str:
|
||||||
|
"""Return the service_project_id."""
|
||||||
|
return self.get_remote_app_data('service-project-id')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_user_name(self) -> str:
|
||||||
|
"""Return the service_user_name."""
|
||||||
|
credentials_id = self.get_remote_app_data('service-credentials')
|
||||||
|
if not credentials_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = self.charm.model.get_secret(id=credentials_id)
|
||||||
|
return credentials.get_content().get("username")
|
||||||
|
except SecretNotFoundError:
|
||||||
|
logger.warning(f"Secret {credentials_id} not found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def service_user_id(self) -> str:
|
||||||
|
"""Return the service_user_id."""
|
||||||
|
return self.get_remote_app_data('service-user-id')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def internal_auth_url(self) -> str:
|
||||||
|
"""Return the internal_auth_url."""
|
||||||
|
return self.get_remote_app_data('internal-auth-url')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_auth_url(self) -> str:
|
||||||
|
"""Return the admin_auth_url."""
|
||||||
|
return self.get_remote_app_data('admin-auth-url')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def public_auth_url(self) -> str:
|
||||||
|
"""Return the public_auth_url."""
|
||||||
|
return self.get_remote_app_data('public-auth-url')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def admin_role(self) -> str:
|
||||||
|
"""Return the admin_role."""
|
||||||
|
return self.get_remote_app_data('admin-role')
|
||||||
|
|
||||||
|
def register_services(self, service_endpoints: dict,
|
||||||
|
region: str) -> None:
|
||||||
|
"""Request access to the IdentityService server."""
|
||||||
|
if self.model.unit.is_leader():
|
||||||
|
logging.debug("Requesting service registration")
|
||||||
|
app_data = self._identity_service_rel.data[self.charm.app]
|
||||||
|
app_data["service-endpoints"] = json.dumps(
|
||||||
|
service_endpoints, sort_keys=True
|
||||||
|
)
|
||||||
|
app_data["region"] = region
|
||||||
|
|
||||||
|
|
||||||
|
class HasIdentityServiceClientsEvent(EventBase):
|
||||||
|
"""Has IdentityServiceClients Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ReadyIdentityServiceClientsEvent(EventBase):
|
||||||
|
"""IdentityServiceClients Ready Event."""
|
||||||
|
|
||||||
|
def __init__(self, handle, relation_id, relation_name, service_endpoints,
|
||||||
|
region, client_app_name):
|
||||||
|
super().__init__(handle)
|
||||||
|
self.relation_id = relation_id
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.service_endpoints = service_endpoints
|
||||||
|
self.region = region
|
||||||
|
self.client_app_name = client_app_name
|
||||||
|
|
||||||
|
def snapshot(self):
|
||||||
|
return {
|
||||||
|
"relation_id": self.relation_id,
|
||||||
|
"relation_name": self.relation_name,
|
||||||
|
"service_endpoints": self.service_endpoints,
|
||||||
|
"client_app_name": self.client_app_name,
|
||||||
|
"region": self.region}
|
||||||
|
|
||||||
|
def restore(self, snapshot):
|
||||||
|
super().restore(snapshot)
|
||||||
|
self.relation_id = snapshot["relation_id"]
|
||||||
|
self.relation_name = snapshot["relation_name"]
|
||||||
|
self.service_endpoints = snapshot["service_endpoints"]
|
||||||
|
self.region = snapshot["region"]
|
||||||
|
self.client_app_name = snapshot["client_app_name"]
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceClientEvents(ObjectEvents):
|
||||||
|
"""Events class for `on`"""
|
||||||
|
|
||||||
|
has_identity_service_clients = EventSource(HasIdentityServiceClientsEvent)
|
||||||
|
ready_identity_service_clients = EventSource(ReadyIdentityServiceClientsEvent)
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceProvides(Object):
|
||||||
|
"""
|
||||||
|
IdentityServiceProvides class
|
||||||
|
"""
|
||||||
|
|
||||||
|
on = IdentityServiceClientEvents()
|
||||||
|
_stored = StoredState()
|
||||||
|
|
||||||
|
def __init__(self, charm, relation_name):
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
self.charm = charm
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_joined,
|
||||||
|
self._on_identity_service_relation_joined,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_changed,
|
||||||
|
self._on_identity_service_relation_changed,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_broken,
|
||||||
|
self._on_identity_service_relation_broken,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _on_identity_service_relation_joined(self, event):
|
||||||
|
"""Handle IdentityService joined."""
|
||||||
|
logging.debug("IdentityService on_joined")
|
||||||
|
self.on.has_identity_service_clients.emit()
|
||||||
|
|
||||||
|
def _on_identity_service_relation_changed(self, event):
|
||||||
|
"""Handle IdentityService changed."""
|
||||||
|
logging.debug("IdentityService on_changed")
|
||||||
|
REQUIRED_KEYS = [
|
||||||
|
'service-endpoints',
|
||||||
|
'region']
|
||||||
|
|
||||||
|
values = [
|
||||||
|
event.relation.data[event.relation.app].get(k)
|
||||||
|
for k in REQUIRED_KEYS
|
||||||
|
]
|
||||||
|
# Validate data on the relation
|
||||||
|
if all(values):
|
||||||
|
service_eps = json.loads(
|
||||||
|
event.relation.data[event.relation.app]['service-endpoints'])
|
||||||
|
self.on.ready_identity_service_clients.emit(
|
||||||
|
event.relation.id,
|
||||||
|
event.relation.name,
|
||||||
|
service_eps,
|
||||||
|
event.relation.data[event.relation.app]['region'],
|
||||||
|
event.relation.app.name)
|
||||||
|
|
||||||
|
def _on_identity_service_relation_broken(self, event):
|
||||||
|
"""Handle IdentityService broken."""
|
||||||
|
logging.debug("IdentityServiceProvides on_departed")
|
||||||
|
# TODO clear data on the relation
|
||||||
|
|
||||||
|
def set_identity_service_credentials(self, relation_name: int,
|
||||||
|
relation_id: str,
|
||||||
|
api_version: str,
|
||||||
|
auth_host: str,
|
||||||
|
auth_port: str,
|
||||||
|
auth_protocol: str,
|
||||||
|
internal_host: str,
|
||||||
|
internal_port: str,
|
||||||
|
internal_protocol: str,
|
||||||
|
service_host: str,
|
||||||
|
service_port: str,
|
||||||
|
service_protocol: str,
|
||||||
|
admin_domain: str,
|
||||||
|
admin_project: str,
|
||||||
|
admin_user: str,
|
||||||
|
service_domain: str,
|
||||||
|
service_project: str,
|
||||||
|
service_user: str,
|
||||||
|
internal_auth_url: str,
|
||||||
|
admin_auth_url: str,
|
||||||
|
public_auth_url: str,
|
||||||
|
service_credentials: str,
|
||||||
|
admin_role: str):
|
||||||
|
logging.debug("Setting identity_service connection information.")
|
||||||
|
_identity_service_rel = None
|
||||||
|
for relation in self.framework.model.relations[relation_name]:
|
||||||
|
if relation.id == relation_id:
|
||||||
|
_identity_service_rel = relation
|
||||||
|
if not _identity_service_rel:
|
||||||
|
# Relation has disappeared so skip send of data
|
||||||
|
return
|
||||||
|
app_data = _identity_service_rel.data[self.charm.app]
|
||||||
|
app_data["api-version"] = api_version
|
||||||
|
app_data["auth-host"] = auth_host
|
||||||
|
app_data["auth-port"] = str(auth_port)
|
||||||
|
app_data["auth-protocol"] = auth_protocol
|
||||||
|
app_data["internal-host"] = internal_host
|
||||||
|
app_data["internal-port"] = str(internal_port)
|
||||||
|
app_data["internal-protocol"] = internal_protocol
|
||||||
|
app_data["service-host"] = service_host
|
||||||
|
app_data["service-port"] = str(service_port)
|
||||||
|
app_data["service-protocol"] = service_protocol
|
||||||
|
app_data["admin-domain-name"] = admin_domain.name
|
||||||
|
app_data["admin-domain-id"] = admin_domain.id
|
||||||
|
app_data["admin-project-name"] = admin_project.name
|
||||||
|
app_data["admin-project-id"] = admin_project.id
|
||||||
|
app_data["admin-user-name"] = admin_user.name
|
||||||
|
app_data["admin-user-id"] = admin_user.id
|
||||||
|
app_data["service-domain-name"] = service_domain.name
|
||||||
|
app_data["service-domain-id"] = service_domain.id
|
||||||
|
app_data["service-project-name"] = service_project.name
|
||||||
|
app_data["service-project-id"] = service_project.id
|
||||||
|
app_data["service-user-id"] = service_user.id
|
||||||
|
app_data["internal-auth-url"] = internal_auth_url
|
||||||
|
app_data["admin-auth-url"] = admin_auth_url
|
||||||
|
app_data["public-auth-url"] = public_auth_url
|
||||||
|
app_data["service-credentials"] = service_credentials
|
||||||
|
app_data["admin-role"] = admin_role
|
286
lib/charms/rabbitmq_k8s/v0/rabbitmq.py
Normal file
286
lib/charms/rabbitmq_k8s/v0/rabbitmq.py
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
"""RabbitMQProvides and Requires module.
|
||||||
|
|
||||||
|
This library contains the Requires and Provides classes for handling
|
||||||
|
the rabbitmq interface.
|
||||||
|
|
||||||
|
Import `RabbitMQRequires` in your charm, with the charm object and the
|
||||||
|
relation name:
|
||||||
|
- self
|
||||||
|
- "amqp"
|
||||||
|
|
||||||
|
Also provide two additional parameters to the charm object:
|
||||||
|
- username
|
||||||
|
- vhost
|
||||||
|
|
||||||
|
Two events are also available to respond to:
|
||||||
|
- connected
|
||||||
|
- ready
|
||||||
|
- goneaway
|
||||||
|
|
||||||
|
A basic example showing the usage of this relation follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
from charms.rabbitmq_k8s.v0.rabbitmq import RabbitMQRequires
|
||||||
|
|
||||||
|
class RabbitMQClientCharm(CharmBase):
|
||||||
|
def __init__(self, *args):
|
||||||
|
super().__init__(*args)
|
||||||
|
# RabbitMQ Requires
|
||||||
|
self.amqp = RabbitMQRequires(
|
||||||
|
self, "amqp",
|
||||||
|
username="myusername",
|
||||||
|
vhost="vhostname"
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.amqp.on.connected, self._on_amqp_connected)
|
||||||
|
self.framework.observe(
|
||||||
|
self.amqp.on.ready, self._on_amqp_ready)
|
||||||
|
self.framework.observe(
|
||||||
|
self.amqp.on.goneaway, self._on_amqp_goneaway)
|
||||||
|
|
||||||
|
def _on_amqp_connected(self, event):
|
||||||
|
'''React to the RabbitMQ connected event.
|
||||||
|
|
||||||
|
This event happens when n RabbitMQ relation is added to the
|
||||||
|
model before credentials etc have been provided.
|
||||||
|
'''
|
||||||
|
# Do something before the relation is complete
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _on_amqp_ready(self, event):
|
||||||
|
'''React to the RabbitMQ ready event.
|
||||||
|
|
||||||
|
The RabbitMQ interface will use the provided username and vhost for the
|
||||||
|
request to the rabbitmq server.
|
||||||
|
'''
|
||||||
|
# RabbitMQ Relation is ready. Do something with the completed relation.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _on_amqp_goneaway(self, event):
|
||||||
|
'''React to the RabbitMQ goneaway event.
|
||||||
|
|
||||||
|
This event happens when an RabbitMQ relation is removed.
|
||||||
|
'''
|
||||||
|
# RabbitMQ Relation has goneaway. shutdown services or suchlike
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The unique Charmhub library identifier, never change it
|
||||||
|
LIBID = "45622352791142fd9cf87232e3bd6f2a"
|
||||||
|
|
||||||
|
# Increment this major API version when introducing breaking changes
|
||||||
|
LIBAPI = 0
|
||||||
|
|
||||||
|
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||||
|
# to 0 if you are raising the major API version
|
||||||
|
LIBPATCH = 1
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from ops.framework import (
|
||||||
|
StoredState,
|
||||||
|
EventBase,
|
||||||
|
ObjectEvents,
|
||||||
|
EventSource,
|
||||||
|
Object,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ops.model import Relation
|
||||||
|
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQConnectedEvent(EventBase):
|
||||||
|
"""RabbitMQ connected Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQReadyEvent(EventBase):
|
||||||
|
"""RabbitMQ ready for use Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQGoneAwayEvent(EventBase):
|
||||||
|
"""RabbitMQ relation has gone-away Event"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQServerEvents(ObjectEvents):
|
||||||
|
"""Events class for `on`"""
|
||||||
|
|
||||||
|
connected = EventSource(RabbitMQConnectedEvent)
|
||||||
|
ready = EventSource(RabbitMQReadyEvent)
|
||||||
|
goneaway = EventSource(RabbitMQGoneAwayEvent)
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQRequires(Object):
|
||||||
|
"""
|
||||||
|
RabbitMQRequires class
|
||||||
|
"""
|
||||||
|
|
||||||
|
on = RabbitMQServerEvents()
|
||||||
|
|
||||||
|
def __init__(self, charm, relation_name: str, username: str, vhost: str):
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
self.charm = charm
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.username = username
|
||||||
|
self.vhost = vhost
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_joined,
|
||||||
|
self._on_amqp_relation_joined,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_changed,
|
||||||
|
self._on_amqp_relation_changed,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_departed,
|
||||||
|
self._on_amqp_relation_changed,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_broken,
|
||||||
|
self._on_amqp_relation_broken,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _on_amqp_relation_joined(self, event):
|
||||||
|
"""RabbitMQ relation joined."""
|
||||||
|
logging.debug("RabbitMQRabbitMQRequires on_joined")
|
||||||
|
self.on.connected.emit()
|
||||||
|
self.request_access(self.username, self.vhost)
|
||||||
|
|
||||||
|
def _on_amqp_relation_changed(self, event):
|
||||||
|
"""RabbitMQ relation changed."""
|
||||||
|
logging.debug("RabbitMQRabbitMQRequires on_changed/departed")
|
||||||
|
if self.password:
|
||||||
|
self.on.ready.emit()
|
||||||
|
|
||||||
|
def _on_amqp_relation_broken(self, event):
|
||||||
|
"""RabbitMQ relation broken."""
|
||||||
|
logging.debug("RabbitMQRabbitMQRequires on_broken")
|
||||||
|
self.on.goneaway.emit()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _amqp_rel(self) -> Relation:
|
||||||
|
"""The RabbitMQ relation."""
|
||||||
|
return self.framework.model.get_relation(self.relation_name)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def password(self) -> str:
|
||||||
|
"""Return the RabbitMQ password from the server side of the relation."""
|
||||||
|
return self._amqp_rel.data[self._amqp_rel.app].get("password")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hostname(self) -> str:
|
||||||
|
"""Return the hostname from the RabbitMQ relation"""
|
||||||
|
return self._amqp_rel.data[self._amqp_rel.app].get("hostname")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ssl_port(self) -> str:
|
||||||
|
"""Return the SSL port from the RabbitMQ relation"""
|
||||||
|
return self._amqp_rel.data[self._amqp_rel.app].get("ssl_port")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ssl_ca(self) -> str:
|
||||||
|
"""Return the SSL port from the RabbitMQ relation"""
|
||||||
|
return self._amqp_rel.data[self._amqp_rel.app].get("ssl_ca")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hostnames(self) -> List[str]:
|
||||||
|
"""Return a list of remote RMQ hosts from the RabbitMQ relation"""
|
||||||
|
_hosts = []
|
||||||
|
for unit in self._amqp_rel.units:
|
||||||
|
_hosts.append(self._amqp_rel.data[unit].get("ingress-address"))
|
||||||
|
return _hosts
|
||||||
|
|
||||||
|
def request_access(self, username: str, vhost: str) -> None:
|
||||||
|
"""Request access to the RabbitMQ server."""
|
||||||
|
if self.model.unit.is_leader():
|
||||||
|
logging.debug("Requesting RabbitMQ user and vhost")
|
||||||
|
self._amqp_rel.data[self.charm.app]["username"] = username
|
||||||
|
self._amqp_rel.data[self.charm.app]["vhost"] = vhost
|
||||||
|
|
||||||
|
|
||||||
|
class HasRabbitMQClientsEvent(EventBase):
|
||||||
|
"""Has RabbitMQClients Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ReadyRabbitMQClientsEvent(EventBase):
|
||||||
|
"""RabbitMQClients Ready Event."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQClientEvents(ObjectEvents):
|
||||||
|
"""Events class for `on`"""
|
||||||
|
|
||||||
|
has_amqp_clients = EventSource(HasRabbitMQClientsEvent)
|
||||||
|
ready_amqp_clients = EventSource(ReadyRabbitMQClientsEvent)
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQProvides(Object):
|
||||||
|
"""
|
||||||
|
RabbitMQProvides class
|
||||||
|
"""
|
||||||
|
|
||||||
|
on = RabbitMQClientEvents()
|
||||||
|
|
||||||
|
def __init__(self, charm, relation_name, callback):
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
self.charm = charm
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.callback = callback
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_joined,
|
||||||
|
self._on_amqp_relation_joined,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_changed,
|
||||||
|
self._on_amqp_relation_changed,
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.charm.on[relation_name].relation_broken,
|
||||||
|
self._on_amqp_relation_broken,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _on_amqp_relation_joined(self, event):
|
||||||
|
"""Handle RabbitMQ joined."""
|
||||||
|
logging.debug("RabbitMQRabbitMQProvides on_joined data={}"
|
||||||
|
.format(event.relation.data[event.relation.app]))
|
||||||
|
self.on.has_amqp_clients.emit()
|
||||||
|
|
||||||
|
def _on_amqp_relation_changed(self, event):
|
||||||
|
"""Handle RabbitMQ changed."""
|
||||||
|
logging.debug("RabbitMQRabbitMQProvides on_changed data={}"
|
||||||
|
.format(event.relation.data[event.relation.app]))
|
||||||
|
# Validate data on the relation
|
||||||
|
if self.username(event) and self.vhost(event):
|
||||||
|
self.on.ready_amqp_clients.emit()
|
||||||
|
if self.charm.unit.is_leader():
|
||||||
|
self.callback(event, self.username(event), self.vhost(event))
|
||||||
|
else:
|
||||||
|
logging.warning("Received RabbitMQ changed event without the "
|
||||||
|
"expected keys ('username', 'vhost') in the "
|
||||||
|
"application data bag. Incompatible charm in "
|
||||||
|
"other end of relation?")
|
||||||
|
|
||||||
|
def _on_amqp_relation_broken(self, event):
|
||||||
|
"""Handle RabbitMQ broken."""
|
||||||
|
logging.debug("RabbitMQRabbitMQProvides on_departed")
|
||||||
|
# TODO clear data on the relation
|
||||||
|
|
||||||
|
def username(self, event):
|
||||||
|
"""Return the RabbitMQ username from the client side of the relation."""
|
||||||
|
return event.relation.data[event.relation.app].get("username")
|
||||||
|
|
||||||
|
def vhost(self, event):
|
||||||
|
"""Return the RabbitMQ vhost from the client side of the relation."""
|
||||||
|
return event.relation.data[event.relation.app].get("vhost")
|
579
lib/charms/traefik_k8s/v1/ingress.py
Normal file
579
lib/charms/traefik_k8s/v1/ingress.py
Normal file
@@ -0,0 +1,579 @@
|
|||||||
|
# Copyright 2022 Canonical Ltd.
|
||||||
|
# See LICENSE file for licensing details.
|
||||||
|
|
||||||
|
r"""# Interface Library for ingress.
|
||||||
|
|
||||||
|
This library wraps relation endpoints using the `ingress` interface
|
||||||
|
and provides a Python API for both requesting and providing per-application
|
||||||
|
ingress, with load-balancing occurring across all units.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
To get started using the library, you just need to fetch the library using `charmcraft`.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd some-charm
|
||||||
|
charmcraft fetch-lib charms.traefik_k8s.v1.ingress
|
||||||
|
```
|
||||||
|
|
||||||
|
In the `metadata.yaml` of the charm, add the following:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
requires:
|
||||||
|
ingress:
|
||||||
|
interface: ingress
|
||||||
|
limit: 1
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, to initialise the library:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from charms.traefik_k8s.v1.ingress import (IngressPerAppRequirer,
|
||||||
|
IngressPerAppReadyEvent, IngressPerAppRevokedEvent)
|
||||||
|
|
||||||
|
class SomeCharm(CharmBase):
|
||||||
|
def __init__(self, *args):
|
||||||
|
# ...
|
||||||
|
self.ingress = IngressPerAppRequirer(self, port=80)
|
||||||
|
# The following event is triggered when the ingress URL to be used
|
||||||
|
# by this deployment of the `SomeCharm` is ready (or changes).
|
||||||
|
self.framework.observe(
|
||||||
|
self.ingress.on.ready, self._on_ingress_ready
|
||||||
|
)
|
||||||
|
self.framework.observe(
|
||||||
|
self.ingress.on.revoked, self._on_ingress_revoked
|
||||||
|
)
|
||||||
|
|
||||||
|
def _on_ingress_ready(self, event: IngressPerAppReadyEvent):
|
||||||
|
logger.info("This app's ingress URL: %s", event.url)
|
||||||
|
|
||||||
|
def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent):
|
||||||
|
logger.info("This app no longer has ingress")
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import socket
|
||||||
|
import typing
|
||||||
|
from typing import Any, Dict, Optional, Tuple, Union
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent
|
||||||
|
from ops.framework import EventSource, Object, ObjectEvents, StoredState
|
||||||
|
from ops.model import ModelError, Relation
|
||||||
|
|
||||||
|
# The unique Charmhub library identifier, never change it
|
||||||
|
LIBID = "e6de2a5cd5b34422a204668f3b8f90d2"
|
||||||
|
|
||||||
|
# Increment this major API version when introducing breaking changes
|
||||||
|
LIBAPI = 1
|
||||||
|
|
||||||
|
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||||
|
# to 0 if you are raising the major API version
|
||||||
|
LIBPATCH = 15
|
||||||
|
|
||||||
|
DEFAULT_RELATION_NAME = "ingress"
|
||||||
|
RELATION_INTERFACE = "ingress"
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import jsonschema
|
||||||
|
|
||||||
|
DO_VALIDATION = True
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
log.warning(
|
||||||
|
"The `ingress` library needs the `jsonschema` package to be able "
|
||||||
|
"to do runtime data validation; without it, it will still work but validation "
|
||||||
|
"will be disabled. \n"
|
||||||
|
"It is recommended to add `jsonschema` to the 'requirements.txt' of your charm, "
|
||||||
|
"which will enable this feature."
|
||||||
|
)
|
||||||
|
DO_VALIDATION = False
|
||||||
|
|
||||||
|
INGRESS_REQUIRES_APP_SCHEMA = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"model": {"type": "string"},
|
||||||
|
"name": {"type": "string"},
|
||||||
|
"host": {"type": "string"},
|
||||||
|
"port": {"type": "string"},
|
||||||
|
"strip-prefix": {"type": "string"},
|
||||||
|
"redirect-https": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["model", "name", "host", "port"],
|
||||||
|
}
|
||||||
|
|
||||||
|
INGRESS_PROVIDES_APP_SCHEMA = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"ingress": {"type": "object", "properties": {"url": {"type": "string"}}},
|
||||||
|
},
|
||||||
|
"required": ["ingress"],
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
from typing import TypedDict
|
||||||
|
except ImportError:
|
||||||
|
from typing_extensions import TypedDict # py35 compatibility
|
||||||
|
|
||||||
|
# Model of the data a unit implementing the requirer will need to provide.
|
||||||
|
RequirerData = TypedDict(
|
||||||
|
"RequirerData",
|
||||||
|
{
|
||||||
|
"model": str,
|
||||||
|
"name": str,
|
||||||
|
"host": str,
|
||||||
|
"port": int,
|
||||||
|
"strip-prefix": bool,
|
||||||
|
"redirect-https": bool,
|
||||||
|
},
|
||||||
|
total=False,
|
||||||
|
)
|
||||||
|
# Provider ingress data model.
|
||||||
|
ProviderIngressData = TypedDict("ProviderIngressData", {"url": str})
|
||||||
|
# Provider application databag model.
|
||||||
|
ProviderApplicationData = TypedDict("ProviderApplicationData", {"ingress": ProviderIngressData}) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_data(data, schema):
|
||||||
|
"""Checks whether `data` matches `schema`.
|
||||||
|
|
||||||
|
Will raise DataValidationError if the data is not valid, else return None.
|
||||||
|
"""
|
||||||
|
if not DO_VALIDATION:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
jsonschema.validate(instance=data, schema=schema)
|
||||||
|
except jsonschema.ValidationError as e:
|
||||||
|
raise DataValidationError(data, schema) from e
|
||||||
|
|
||||||
|
|
||||||
|
class DataValidationError(RuntimeError):
|
||||||
|
"""Raised when data validation fails on IPU relation data."""
|
||||||
|
|
||||||
|
|
||||||
|
class _IngressPerAppBase(Object):
|
||||||
|
"""Base class for IngressPerUnit interface classes."""
|
||||||
|
|
||||||
|
def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME):
|
||||||
|
super().__init__(charm, relation_name + "_V1")
|
||||||
|
|
||||||
|
self.charm: CharmBase = charm
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self.app = self.charm.app
|
||||||
|
self.unit = self.charm.unit
|
||||||
|
|
||||||
|
observe = self.framework.observe
|
||||||
|
rel_events = charm.on[relation_name]
|
||||||
|
observe(rel_events.relation_created, self._handle_relation)
|
||||||
|
observe(rel_events.relation_joined, self._handle_relation)
|
||||||
|
observe(rel_events.relation_changed, self._handle_relation)
|
||||||
|
observe(rel_events.relation_broken, self._handle_relation_broken)
|
||||||
|
observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore
|
||||||
|
observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def relations(self):
|
||||||
|
"""The list of Relation instances associated with this endpoint."""
|
||||||
|
return list(self.charm.model.relations[self.relation_name])
|
||||||
|
|
||||||
|
def _handle_relation(self, event):
|
||||||
|
"""Subclasses should implement this method to handle a relation update."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _handle_relation_broken(self, event):
|
||||||
|
"""Subclasses should implement this method to handle a relation breaking."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _handle_upgrade_or_leader(self, event):
|
||||||
|
"""Subclasses should implement this method to handle upgrades or leadership change."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class _IPAEvent(RelationEvent):
|
||||||
|
__args__: Tuple[str, ...] = ()
|
||||||
|
__optional_kwargs__: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __attrs__(cls):
|
||||||
|
return cls.__args__ + tuple(cls.__optional_kwargs__.keys())
|
||||||
|
|
||||||
|
def __init__(self, handle, relation, *args, **kwargs):
|
||||||
|
super().__init__(handle, relation)
|
||||||
|
|
||||||
|
if not len(self.__args__) == len(args):
|
||||||
|
raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args)))
|
||||||
|
|
||||||
|
for attr, obj in zip(self.__args__, args):
|
||||||
|
setattr(self, attr, obj)
|
||||||
|
for attr, default in self.__optional_kwargs__.items():
|
||||||
|
obj = kwargs.get(attr, default)
|
||||||
|
setattr(self, attr, obj)
|
||||||
|
|
||||||
|
def snapshot(self):
|
||||||
|
dct = super().snapshot()
|
||||||
|
for attr in self.__attrs__():
|
||||||
|
obj = getattr(self, attr)
|
||||||
|
try:
|
||||||
|
dct[attr] = obj
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(
|
||||||
|
"cannot automagically serialize {}: "
|
||||||
|
"override this method and do it "
|
||||||
|
"manually.".format(obj)
|
||||||
|
) from e
|
||||||
|
|
||||||
|
return dct
|
||||||
|
|
||||||
|
def restore(self, snapshot) -> None:
|
||||||
|
super().restore(snapshot)
|
||||||
|
for attr, obj in snapshot.items():
|
||||||
|
setattr(self, attr, obj)
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppDataProvidedEvent(_IPAEvent):
|
||||||
|
"""Event representing that ingress data has been provided for an app."""
|
||||||
|
|
||||||
|
__args__ = ("name", "model", "port", "host", "strip_prefix", "redirect_https")
|
||||||
|
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
name: Optional[str] = None
|
||||||
|
model: Optional[str] = None
|
||||||
|
port: Optional[str] = None
|
||||||
|
host: Optional[str] = None
|
||||||
|
strip_prefix: bool = False
|
||||||
|
redirect_https: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppDataRemovedEvent(RelationEvent):
|
||||||
|
"""Event representing that ingress data has been removed for an app."""
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppProviderEvents(ObjectEvents):
|
||||||
|
"""Container for IPA Provider events."""
|
||||||
|
|
||||||
|
data_provided = EventSource(IngressPerAppDataProvidedEvent)
|
||||||
|
data_removed = EventSource(IngressPerAppDataRemovedEvent)
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppProvider(_IngressPerAppBase):
|
||||||
|
"""Implementation of the provider of ingress."""
|
||||||
|
|
||||||
|
on = IngressPerAppProviderEvents() # type: ignore
|
||||||
|
|
||||||
|
def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME):
|
||||||
|
"""Constructor for IngressPerAppProvider.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
charm: The charm that is instantiating the instance.
|
||||||
|
relation_name: The name of the relation endpoint to bind to
|
||||||
|
(defaults to "ingress").
|
||||||
|
"""
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
|
||||||
|
def _handle_relation(self, event):
|
||||||
|
# created, joined or changed: if remote side has sent the required data:
|
||||||
|
# notify listeners.
|
||||||
|
if self.is_ready(event.relation):
|
||||||
|
data = self._get_requirer_data(event.relation)
|
||||||
|
self.on.data_provided.emit( # type: ignore
|
||||||
|
event.relation,
|
||||||
|
data["name"],
|
||||||
|
data["model"],
|
||||||
|
data["port"],
|
||||||
|
data["host"],
|
||||||
|
data.get("strip-prefix", False),
|
||||||
|
data.get("redirect-https", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _handle_relation_broken(self, event):
|
||||||
|
self.on.data_removed.emit(event.relation) # type: ignore
|
||||||
|
|
||||||
|
def wipe_ingress_data(self, relation: Relation):
|
||||||
|
"""Clear ingress data from relation."""
|
||||||
|
assert self.unit.is_leader(), "only leaders can do this"
|
||||||
|
try:
|
||||||
|
relation.data
|
||||||
|
except ModelError as e:
|
||||||
|
log.warning(
|
||||||
|
"error {} accessing relation data for {!r}. "
|
||||||
|
"Probably a ghost of a dead relation is still "
|
||||||
|
"lingering around.".format(e, relation.name)
|
||||||
|
)
|
||||||
|
return
|
||||||
|
del relation.data[self.app]["ingress"]
|
||||||
|
|
||||||
|
def _get_requirer_data(self, relation: Relation) -> RequirerData: # type: ignore
|
||||||
|
"""Fetch and validate the requirer's app databag.
|
||||||
|
|
||||||
|
For convenience, we convert 'port' to integer.
|
||||||
|
"""
|
||||||
|
if not relation.app or not relation.app.name: # type: ignore
|
||||||
|
# Handle edge case where remote app name can be missing, e.g.,
|
||||||
|
# relation_broken events.
|
||||||
|
# FIXME https://github.com/canonical/traefik-k8s-operator/issues/34
|
||||||
|
return {}
|
||||||
|
|
||||||
|
databag = relation.data[relation.app]
|
||||||
|
remote_data: Dict[str, Union[int, str]] = {}
|
||||||
|
for k in ("port", "host", "model", "name", "mode", "strip-prefix", "redirect-https"):
|
||||||
|
v = databag.get(k)
|
||||||
|
if v is not None:
|
||||||
|
remote_data[k] = v
|
||||||
|
_validate_data(remote_data, INGRESS_REQUIRES_APP_SCHEMA)
|
||||||
|
remote_data["port"] = int(remote_data["port"])
|
||||||
|
remote_data["strip-prefix"] = bool(remote_data.get("strip-prefix", "false") == "true")
|
||||||
|
remote_data["redirect-https"] = bool(remote_data.get("redirect-https", "false") == "true")
|
||||||
|
return typing.cast(RequirerData, remote_data)
|
||||||
|
|
||||||
|
def get_data(self, relation: Relation) -> RequirerData: # type: ignore
|
||||||
|
"""Fetch the remote app's databag, i.e. the requirer data."""
|
||||||
|
return self._get_requirer_data(relation)
|
||||||
|
|
||||||
|
def is_ready(self, relation: Optional[Relation] = None):
|
||||||
|
"""The Provider is ready if the requirer has sent valid data."""
|
||||||
|
if not relation:
|
||||||
|
return any(map(self.is_ready, self.relations))
|
||||||
|
|
||||||
|
try:
|
||||||
|
return bool(self._get_requirer_data(relation))
|
||||||
|
except DataValidationError as e:
|
||||||
|
log.warning("Requirer not ready; validation error encountered: %s" % str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _provided_url(self, relation: Relation) -> ProviderIngressData: # type: ignore
|
||||||
|
"""Fetch and validate this app databag; return the ingress url."""
|
||||||
|
if not relation.app or not relation.app.name or not self.unit.is_leader(): # type: ignore
|
||||||
|
# Handle edge case where remote app name can be missing, e.g.,
|
||||||
|
# relation_broken events.
|
||||||
|
# Also, only leader units can read own app databags.
|
||||||
|
# FIXME https://github.com/canonical/traefik-k8s-operator/issues/34
|
||||||
|
return typing.cast(ProviderIngressData, {}) # noqa
|
||||||
|
|
||||||
|
# fetch the provider's app databag
|
||||||
|
raw_data = relation.data[self.app].get("ingress")
|
||||||
|
if not raw_data:
|
||||||
|
raise RuntimeError("This application did not `publish_url` yet.")
|
||||||
|
|
||||||
|
ingress: ProviderIngressData = yaml.safe_load(raw_data)
|
||||||
|
_validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA)
|
||||||
|
return ingress
|
||||||
|
|
||||||
|
def publish_url(self, relation: Relation, url: str):
|
||||||
|
"""Publish to the app databag the ingress url."""
|
||||||
|
ingress = {"url": url}
|
||||||
|
ingress_data = {"ingress": ingress}
|
||||||
|
_validate_data(ingress_data, INGRESS_PROVIDES_APP_SCHEMA)
|
||||||
|
relation.data[self.app]["ingress"] = yaml.safe_dump(ingress)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def proxied_endpoints(self):
|
||||||
|
"""Returns the ingress settings provided to applications by this IngressPerAppProvider.
|
||||||
|
|
||||||
|
For example, when this IngressPerAppProvider has provided the
|
||||||
|
`http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary
|
||||||
|
will be:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"my-app": {
|
||||||
|
"url": "http://foo.bar/my-model.my-app"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
|
||||||
|
for ingress_relation in self.relations:
|
||||||
|
assert (
|
||||||
|
ingress_relation.app
|
||||||
|
), "no app in relation (shouldn't happen)" # for type checker
|
||||||
|
results[ingress_relation.app.name] = self._provided_url(ingress_relation)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppReadyEvent(_IPAEvent):
|
||||||
|
"""Event representing that ingress for an app is ready."""
|
||||||
|
|
||||||
|
__args__ = ("url",)
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
url: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppRevokedEvent(RelationEvent):
|
||||||
|
"""Event representing that ingress for an app has been revoked."""
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppRequirerEvents(ObjectEvents):
|
||||||
|
"""Container for IPA Requirer events."""
|
||||||
|
|
||||||
|
ready = EventSource(IngressPerAppReadyEvent)
|
||||||
|
revoked = EventSource(IngressPerAppRevokedEvent)
|
||||||
|
|
||||||
|
|
||||||
|
class IngressPerAppRequirer(_IngressPerAppBase):
|
||||||
|
"""Implementation of the requirer of the ingress relation."""
|
||||||
|
|
||||||
|
on = IngressPerAppRequirerEvents() # type: ignore
|
||||||
|
|
||||||
|
# used to prevent spurious urls to be sent out if the event we're currently
|
||||||
|
# handling is a relation-broken one.
|
||||||
|
_stored = StoredState()
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
charm: CharmBase,
|
||||||
|
relation_name: str = DEFAULT_RELATION_NAME,
|
||||||
|
*,
|
||||||
|
host: Optional[str] = None,
|
||||||
|
port: Optional[int] = None,
|
||||||
|
strip_prefix: bool = False,
|
||||||
|
redirect_https: bool = False,
|
||||||
|
):
|
||||||
|
"""Constructor for IngressRequirer.
|
||||||
|
|
||||||
|
The request args can be used to specify the ingress properties when the
|
||||||
|
instance is created. If any are set, at least `port` is required, and
|
||||||
|
they will be sent to the ingress provider as soon as it is available.
|
||||||
|
All request args must be given as keyword args.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
charm: the charm that is instantiating the library.
|
||||||
|
relation_name: the name of the relation endpoint to bind to (defaults to `ingress`);
|
||||||
|
relation must be of interface type `ingress` and have "limit: 1")
|
||||||
|
host: Hostname to be used by the ingress provider to address the requiring
|
||||||
|
application; if unspecified, the default Kubernetes service name will be used.
|
||||||
|
strip_prefix: configure Traefik to strip the path prefix.
|
||||||
|
redirect_https: redirect incoming requests to the HTTPS.
|
||||||
|
|
||||||
|
Request Args:
|
||||||
|
port: the port of the service
|
||||||
|
"""
|
||||||
|
super().__init__(charm, relation_name)
|
||||||
|
self.charm: CharmBase = charm
|
||||||
|
self.relation_name = relation_name
|
||||||
|
self._strip_prefix = strip_prefix
|
||||||
|
self._redirect_https = redirect_https
|
||||||
|
|
||||||
|
self._stored.set_default(current_url=None) # type: ignore
|
||||||
|
|
||||||
|
# if instantiated with a port, and we are related, then
|
||||||
|
# we immediately publish our ingress data to speed up the process.
|
||||||
|
if port:
|
||||||
|
self._auto_data = host, port
|
||||||
|
else:
|
||||||
|
self._auto_data = None
|
||||||
|
|
||||||
|
def _handle_relation(self, event):
|
||||||
|
# created, joined or changed: if we have auto data: publish it
|
||||||
|
self._publish_auto_data(event.relation)
|
||||||
|
|
||||||
|
if self.is_ready():
|
||||||
|
# Avoid spurious events, emit only when there is a NEW URL available
|
||||||
|
new_url = (
|
||||||
|
None
|
||||||
|
if isinstance(event, RelationBrokenEvent)
|
||||||
|
else self._get_url_from_relation_data()
|
||||||
|
)
|
||||||
|
if self._stored.current_url != new_url: # type: ignore
|
||||||
|
self._stored.current_url = new_url # type: ignore
|
||||||
|
self.on.ready.emit(event.relation, new_url) # type: ignore
|
||||||
|
|
||||||
|
def _handle_relation_broken(self, event):
|
||||||
|
self._stored.current_url = None # type: ignore
|
||||||
|
self.on.revoked.emit(event.relation) # type: ignore
|
||||||
|
|
||||||
|
def _handle_upgrade_or_leader(self, event):
|
||||||
|
"""On upgrade/leadership change: ensure we publish the data we have."""
|
||||||
|
for relation in self.relations:
|
||||||
|
self._publish_auto_data(relation)
|
||||||
|
|
||||||
|
def is_ready(self):
|
||||||
|
"""The Requirer is ready if the Provider has sent valid data."""
|
||||||
|
try:
|
||||||
|
return bool(self._get_url_from_relation_data())
|
||||||
|
except DataValidationError as e:
|
||||||
|
log.warning("Requirer not ready; validation error encountered: %s" % str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _publish_auto_data(self, relation: Relation):
|
||||||
|
if self._auto_data and self.unit.is_leader():
|
||||||
|
host, port = self._auto_data
|
||||||
|
self.provide_ingress_requirements(host=host, port=port)
|
||||||
|
|
||||||
|
def provide_ingress_requirements(self, *, host: Optional[str] = None, port: int):
|
||||||
|
"""Publishes the data that Traefik needs to provide ingress.
|
||||||
|
|
||||||
|
NB only the leader unit is supposed to do this.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
host: Hostname to be used by the ingress provider to address the
|
||||||
|
requirer unit; if unspecified, FQDN will be used instead
|
||||||
|
port: the port of the service (required)
|
||||||
|
"""
|
||||||
|
# get only the leader to publish the data since we only
|
||||||
|
# require one unit to publish it -- it will not differ between units,
|
||||||
|
# unlike in ingress-per-unit.
|
||||||
|
assert self.unit.is_leader(), "only leaders should do this."
|
||||||
|
assert self.relation, "no relation"
|
||||||
|
|
||||||
|
if not host:
|
||||||
|
host = socket.getfqdn()
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"model": self.model.name,
|
||||||
|
"name": self.app.name,
|
||||||
|
"host": host,
|
||||||
|
"port": str(port),
|
||||||
|
}
|
||||||
|
|
||||||
|
if self._strip_prefix:
|
||||||
|
data["strip-prefix"] = "true"
|
||||||
|
|
||||||
|
if self._redirect_https:
|
||||||
|
data["redirect-https"] = "true"
|
||||||
|
|
||||||
|
_validate_data(data, INGRESS_REQUIRES_APP_SCHEMA)
|
||||||
|
self.relation.data[self.app].update(data)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def relation(self):
|
||||||
|
"""The established Relation instance, or None."""
|
||||||
|
return self.relations[0] if self.relations else None
|
||||||
|
|
||||||
|
def _get_url_from_relation_data(self) -> Optional[str]:
|
||||||
|
"""The full ingress URL to reach the current unit.
|
||||||
|
|
||||||
|
Returns None if the URL isn't available yet.
|
||||||
|
"""
|
||||||
|
relation = self.relation
|
||||||
|
if not relation or not relation.app:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# fetch the provider's app databag
|
||||||
|
try:
|
||||||
|
raw = relation.data.get(relation.app, {}).get("ingress")
|
||||||
|
except ModelError as e:
|
||||||
|
log.debug(
|
||||||
|
f"Error {e} attempting to read remote app data; "
|
||||||
|
f"probably we are in a relation_departed hook"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not raw:
|
||||||
|
return None
|
||||||
|
|
||||||
|
ingress: ProviderIngressData = yaml.safe_load(raw)
|
||||||
|
_validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA)
|
||||||
|
return ingress["url"]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def url(self) -> Optional[str]:
|
||||||
|
"""The full ingress URL to reach the current unit.
|
||||||
|
|
||||||
|
Returns None if the URL isn't available yet.
|
||||||
|
"""
|
||||||
|
data = self._stored.current_url or self._get_url_from_relation_data() # type: ignore
|
||||||
|
assert isinstance(data, (str, type(None))) # for static checker
|
||||||
|
return data
|
@@ -9,18 +9,28 @@ description: |
|
|||||||
version: 3
|
version: 3
|
||||||
bases:
|
bases:
|
||||||
- name: ubuntu
|
- name: ubuntu
|
||||||
channel: 20.04/stable
|
channel: 22.04/stable
|
||||||
|
assumes:
|
||||||
|
- k8s-api
|
||||||
|
- juju >= 3.2
|
||||||
tags:
|
tags:
|
||||||
- openstack
|
- openstack
|
||||||
|
source: https://opendev.org/openstack/charm-gnocchi-k8s
|
||||||
|
issues: https://bugs.launchpad.net/charm-gnocchi-k8s
|
||||||
|
|
||||||
containers:
|
containers:
|
||||||
gnocchi-api:
|
gnocchi-api:
|
||||||
resource: gnocchi-api-image
|
resource: gnocchi-api-image
|
||||||
|
gnocchi-metricd:
|
||||||
|
resource: gnocchi-metricd-image
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
gnocchi-api-image:
|
gnocchi-api-image:
|
||||||
type: oci-image
|
type: oci-image
|
||||||
description: OCI image for OpenStack gnocchi
|
description: OCI image for OpenStack Gnocchi api service
|
||||||
|
gnocchi-metricd-image:
|
||||||
|
type: oci-image
|
||||||
|
description: OCI image for OpenStack Gnocchi Metric service
|
||||||
|
|
||||||
requires:
|
requires:
|
||||||
database:
|
database:
|
||||||
@@ -35,12 +45,8 @@ requires:
|
|||||||
ingress-public:
|
ingress-public:
|
||||||
interface: ingress
|
interface: ingress
|
||||||
limit: 1
|
limit: 1
|
||||||
amqp:
|
ceph:
|
||||||
interface: rabbitmq
|
interface: ceph-client
|
||||||
|
|
||||||
provides:
|
|
||||||
gnocchi:
|
|
||||||
interface: gnocchi
|
|
||||||
|
|
||||||
peers:
|
peers:
|
||||||
peers:
|
peers:
|
||||||
|
10
osci.yaml
Normal file
10
osci.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
- project:
|
||||||
|
templates:
|
||||||
|
- charm-publish-jobs
|
||||||
|
vars:
|
||||||
|
needs_charm_build: true
|
||||||
|
charm_build_name: gnocchi-k8s
|
||||||
|
build_type: charmcraft
|
||||||
|
publish_charm: true
|
||||||
|
charmcraft_channel: 2.0/stable
|
||||||
|
publish_channel: 2023.1/edge
|
@@ -1,3 +1,6 @@
|
|||||||
|
# Copyright 2022 Canonical Ltd.
|
||||||
|
# See LICENSE file for licensing details.
|
||||||
|
|
||||||
# Testing tools configuration
|
# Testing tools configuration
|
||||||
[tool.coverage.run]
|
[tool.coverage.run]
|
||||||
branch = true
|
branch = true
|
||||||
@@ -11,36 +14,26 @@ log_cli_level = "INFO"
|
|||||||
|
|
||||||
# Formatting tools configuration
|
# Formatting tools configuration
|
||||||
[tool.black]
|
[tool.black]
|
||||||
line-length = 99
|
line-length = 79
|
||||||
target-version = ["py38"]
|
|
||||||
|
[tool.isort]
|
||||||
|
profile = "black"
|
||||||
|
multi_line_output = 3
|
||||||
|
force_grid_wrap = true
|
||||||
|
|
||||||
# Linting tools configuration
|
# Linting tools configuration
|
||||||
[tool.ruff]
|
[tool.flake8]
|
||||||
line-length = 99
|
max-line-length = 79
|
||||||
select = ["E", "W", "F", "C", "N", "D", "I001"]
|
max-doc-length = 99
|
||||||
extend-ignore = [
|
|
||||||
"D203",
|
|
||||||
"D204",
|
|
||||||
"D213",
|
|
||||||
"D215",
|
|
||||||
"D400",
|
|
||||||
"D404",
|
|
||||||
"D406",
|
|
||||||
"D407",
|
|
||||||
"D408",
|
|
||||||
"D409",
|
|
||||||
"D413",
|
|
||||||
]
|
|
||||||
ignore = ["E501", "D107"]
|
|
||||||
extend-exclude = ["__pycache__", "*.egg_info"]
|
|
||||||
per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]}
|
|
||||||
|
|
||||||
[tool.ruff.mccabe]
|
|
||||||
max-complexity = 10
|
max-complexity = 10
|
||||||
|
exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
|
||||||
[tool.codespell]
|
select = ["E", "W", "F", "C", "N", "R", "D", "H"]
|
||||||
skip = "build,lib,venv,icon.svg,.tox,.git,.mypy_cache,.ruff_cache,.coverage"
|
# Ignore W503, E501 because using black creates errors with this
|
||||||
|
# Ignore D107 Missing docstring in __init__
|
||||||
[tool.pyright]
|
ignore = ["W503", "E501", "D107", "E402"]
|
||||||
include = ["src/**.py"]
|
per-file-ignores = []
|
||||||
|
docstring-convention = "google"
|
||||||
|
# Check for properly formatted copyright header in each file
|
||||||
|
copyright-check = "True"
|
||||||
|
copyright-author = "Canonical Ltd."
|
||||||
|
copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s"
|
||||||
|
13
rename.sh
Executable file
13
rename.sh
Executable file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}')
|
||||||
|
echo "renaming ${charm}_*.charm to ${charm}.charm"
|
||||||
|
echo -n "pwd: "
|
||||||
|
pwd
|
||||||
|
ls -al
|
||||||
|
echo "Removing bad downloaded charm maybe?"
|
||||||
|
if [[ -e "${charm}.charm" ]];
|
||||||
|
then
|
||||||
|
rm "${charm}.charm"
|
||||||
|
fi
|
||||||
|
echo "Renaming charm here."
|
||||||
|
mv ${charm}_*.charm ${charm}.charm
|
@@ -2,7 +2,8 @@ ops
|
|||||||
jinja2
|
jinja2
|
||||||
git+https://github.com/openstack/charm-ops-sunbeam#egg=ops_sunbeam
|
git+https://github.com/openstack/charm-ops-sunbeam#egg=ops_sunbeam
|
||||||
lightkube
|
lightkube
|
||||||
# These are only needeed if the charm relates to ceph
|
netifaces
|
||||||
|
jsonschema
|
||||||
|
|
||||||
git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
|
git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
|
||||||
# Charmhelpers is only present as interface_ceph_client uses it.
|
|
||||||
git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
|
git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
|
||||||
|
279
src/charm.py
279
src/charm.py
@@ -1,61 +1,304 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2023 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
"""Gnocchi Operator Charm.
|
"""Gnocchi Operator Charm.
|
||||||
|
|
||||||
This charm provide Gnocchi services as part of an OpenStack deployment
|
This charm provide Gnocchi services as part of an OpenStack deployment
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from typing import (
|
||||||
|
List,
|
||||||
|
)
|
||||||
|
|
||||||
from ops.framework import StoredState
|
import ops
|
||||||
from ops.main import main
|
|
||||||
|
|
||||||
import ops_sunbeam.charm as sunbeam_charm
|
import ops_sunbeam.charm as sunbeam_charm
|
||||||
|
import ops_sunbeam.config_contexts as sunbeam_ctxts
|
||||||
|
import ops_sunbeam.container_handlers as sunbeam_chandlers
|
||||||
|
import ops_sunbeam.core as sunbeam_core
|
||||||
|
import ops_sunbeam.guard as sunbeam_guard
|
||||||
|
import ops_sunbeam.relation_handlers as sunbeam_rhandlers
|
||||||
|
from ops.framework import (
|
||||||
|
StoredState,
|
||||||
|
)
|
||||||
|
from ops.main import (
|
||||||
|
main,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
GNOCHHI_WSGI_CONTAINER = "gnocchi-api"
|
||||||
|
GNOCCHI_METRICD_CONTAINER = "gnocchi-metricd"
|
||||||
|
|
||||||
|
|
||||||
|
class GnocchiWSGIPebbleHandler(sunbeam_chandlers.WSGIPebbleHandler):
|
||||||
|
"""Pebble handler for Gnocchi WSGI services."""
|
||||||
|
|
||||||
|
def init_service(self, context) -> None:
|
||||||
|
"""Enable and start WSGI service."""
|
||||||
|
self.write_config(context)
|
||||||
|
try:
|
||||||
|
self.execute(["a2dissite", "gnocchi-api"], exception_on_error=True)
|
||||||
|
self.execute(
|
||||||
|
["a2ensite", self.wsgi_service_name], exception_on_error=True
|
||||||
|
)
|
||||||
|
except ops.pebble.ExecError:
|
||||||
|
logger.exception(
|
||||||
|
f"Failed to enable {self.wsgi_service_name} site in apache"
|
||||||
|
)
|
||||||
|
# ignore for now - pebble is raising an exited too quickly, but it
|
||||||
|
# appears to work properly.
|
||||||
|
self.start_wsgi()
|
||||||
|
|
||||||
|
def default_container_configs(
|
||||||
|
self,
|
||||||
|
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||||
|
"""Container configurations for handler."""
|
||||||
|
_cconfigs = super().default_container_configs()
|
||||||
|
_cconfigs.extend(
|
||||||
|
[
|
||||||
|
sunbeam_core.ContainerConfigFile(
|
||||||
|
"/etc/gnocchi/api-paste.ini",
|
||||||
|
self.charm.service_user,
|
||||||
|
self.charm.service_group,
|
||||||
|
0o640,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
_cconfigs.extend(self.charm.default_container_configs())
|
||||||
|
return _cconfigs
|
||||||
|
|
||||||
|
|
||||||
|
class GnocchiMetricdPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
|
||||||
|
"""Pebble handler for Gnocchi metricd container."""
|
||||||
|
|
||||||
|
def get_layer(self):
|
||||||
|
"""Gnocchi Metricd service.
|
||||||
|
|
||||||
|
:returns: pebble service layer configuration for gnocchi metricd
|
||||||
|
service
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"summary": "gnocchi metricd layer",
|
||||||
|
"description": "pebble configuration for gnocchi metricd service",
|
||||||
|
"services": {
|
||||||
|
"gnocchi-metricd": {
|
||||||
|
"override": "replace",
|
||||||
|
"summary": "Gnocchi Metricd",
|
||||||
|
"command": "gnocchi-metricd",
|
||||||
|
"startup": "enabled",
|
||||||
|
"user": self.charm.service_user,
|
||||||
|
"group": self.charm.service_group,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def default_container_configs(
|
||||||
|
self,
|
||||||
|
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||||
|
"""Container configurations for handler."""
|
||||||
|
_cconfigs = super().default_container_configs()
|
||||||
|
_cconfigs.extend(self.charm.default_container_configs())
|
||||||
|
return _cconfigs
|
||||||
|
|
||||||
|
|
||||||
class GnocchiOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
class GnocchiOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||||
"""Charm the service."""
|
"""Charm the service."""
|
||||||
|
|
||||||
_state = StoredState()
|
_state = StoredState()
|
||||||
service_name = "gnocchi-api"
|
service_name = "gnocchi-api"
|
||||||
wsgi_admin_script = '/usr/bin/gnocchi-api-wsgi'
|
wsgi_admin_script = "/usr/bin/gnocchi-api"
|
||||||
wsgi_public_script = '/usr/bin/gnocchi-api-wsgi'
|
wsgi_public_script = "/usr/bin/gnocchi-api"
|
||||||
|
|
||||||
db_sync_cmds = [
|
db_sync_cmds = [["gnocchi-upgrade"]]
|
||||||
['/snap/bin/gnocchi.upgrade', '--log-file=/var/snap/gnocchi/common/log/gnocchi-upgrade.log']
|
|
||||||
]
|
mandatory_relations = {
|
||||||
|
"database",
|
||||||
|
"identity-service",
|
||||||
|
"ingress-public",
|
||||||
|
"ceph",
|
||||||
|
}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def service_conf(self) -> str:
|
def service_conf(self) -> str:
|
||||||
"""Service default configuration file."""
|
"""Service default configuration file."""
|
||||||
return f"/etc/gnocchi/gnocchi.conf"
|
return "/etc/gnocchi/gnocchi.conf"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def service_user(self) -> str:
|
def service_user(self) -> str:
|
||||||
"""Service user file and directory ownership."""
|
"""Service user file and directory ownership."""
|
||||||
return 'gnocchi'
|
return "gnocchi"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def service_group(self) -> str:
|
def service_group(self) -> str:
|
||||||
"""Service group file and directory ownership."""
|
"""Service group file and directory ownership."""
|
||||||
return 'gnocchi'
|
return "gnocchi"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def service_endpoints(self):
|
def service_endpoints(self):
|
||||||
|
"""Return service endpoints for the service."""
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
'service_name': 'gnocchi',
|
"service_name": "gnocchi",
|
||||||
'type': 'gnocchi',
|
"type": "gnocchi",
|
||||||
'description': "OpenStack Gnocchi API",
|
"description": "OpenStack Gnocchi API",
|
||||||
'internal_url': f'{self.internal_url}',
|
"internal_url": f"{self.internal_url}",
|
||||||
'public_url': f'{self.public_url}',
|
"public_url": f"{self.public_url}",
|
||||||
'admin_url': f'{self.admin_url}'}]
|
"admin_url": f"{self.admin_url}",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def default_public_ingress_port(self):
|
def default_public_ingress_port(self):
|
||||||
|
"""Ingress Port for API service."""
|
||||||
return 8041
|
return 8041
|
||||||
|
|
||||||
|
@property
|
||||||
|
def healthcheck_http_url(self) -> str:
|
||||||
|
"""Healthcheck HTTP URL for the service."""
|
||||||
|
return (
|
||||||
|
f"http://localhost:{self.default_public_ingress_port}/healthcheck"
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_pebble_handlers(
|
||||||
|
self,
|
||||||
|
) -> List[sunbeam_chandlers.ServicePebbleHandler]:
|
||||||
|
"""Pebble handlers for operator."""
|
||||||
|
pebble_handlers = [
|
||||||
|
GnocchiWSGIPebbleHandler(
|
||||||
|
self,
|
||||||
|
GNOCHHI_WSGI_CONTAINER,
|
||||||
|
self.service_name,
|
||||||
|
[],
|
||||||
|
self.template_dir,
|
||||||
|
self.configure_charm,
|
||||||
|
f"wsgi-{self.service_name}",
|
||||||
|
),
|
||||||
|
GnocchiMetricdPebbleHandler(
|
||||||
|
self,
|
||||||
|
GNOCCHI_METRICD_CONTAINER,
|
||||||
|
"gnocchi-metricd",
|
||||||
|
[],
|
||||||
|
self.template_dir,
|
||||||
|
self.configure_charm,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
return pebble_handlers
|
||||||
|
|
||||||
|
def default_container_configs(
|
||||||
|
self,
|
||||||
|
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||||
|
"""Container configurations for handler."""
|
||||||
|
# Update with configs that are common for all containers
|
||||||
|
return [
|
||||||
|
sunbeam_core.ContainerConfigFile(
|
||||||
|
"/etc/gnocchi/gnocchi.conf",
|
||||||
|
self.service_user,
|
||||||
|
self.service_group,
|
||||||
|
0o640,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class GnocchiCephOperatorCharm(GnocchiOperatorCharm):
|
||||||
|
"""Charm the Gnocchi service with Ceph backend."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]:
|
||||||
|
"""Configuration contexts for the operator."""
|
||||||
|
contexts = super().config_contexts
|
||||||
|
contexts.append(
|
||||||
|
sunbeam_ctxts.CephConfigurationContext(self, "ceph_config")
|
||||||
|
)
|
||||||
|
return contexts
|
||||||
|
|
||||||
|
def get_relation_handlers(self) -> List[sunbeam_rhandlers.RelationHandler]:
|
||||||
|
"""Relation handlers for the service."""
|
||||||
|
handlers = super().get_relation_handlers()
|
||||||
|
self.ceph = sunbeam_rhandlers.CephClientHandler(
|
||||||
|
self,
|
||||||
|
"ceph",
|
||||||
|
self.configure_charm,
|
||||||
|
allow_ec_overwrites=True,
|
||||||
|
app_name="rbd",
|
||||||
|
mandatory="ceph" in self.mandatory_relations,
|
||||||
|
)
|
||||||
|
handlers.append(self.ceph)
|
||||||
|
return handlers
|
||||||
|
|
||||||
|
def init_container_services(self):
|
||||||
|
"""Setp ceph keyring and init pebble handlers that are ready."""
|
||||||
|
for ph in self.pebble_handlers:
|
||||||
|
if ph.pebble_ready:
|
||||||
|
ph.execute(
|
||||||
|
[
|
||||||
|
"ceph-authtool",
|
||||||
|
f"/etc/ceph/ceph.client.{self.app.name}.keyring",
|
||||||
|
"--create-keyring",
|
||||||
|
f"--name=client.{self.app.name}",
|
||||||
|
f"--add-key={self.ceph.key}",
|
||||||
|
],
|
||||||
|
exception_on_error=True,
|
||||||
|
)
|
||||||
|
ph.execute(
|
||||||
|
[
|
||||||
|
"chown",
|
||||||
|
f"{self.service_user}:{self.service_group}",
|
||||||
|
f"/etc/ceph/ceph.client.{self.app.name}.keyring",
|
||||||
|
"/etc/ceph/rbdmap",
|
||||||
|
],
|
||||||
|
exception_on_error=True,
|
||||||
|
)
|
||||||
|
ph.execute(
|
||||||
|
[
|
||||||
|
"chmod",
|
||||||
|
"640",
|
||||||
|
f"/etc/ceph/ceph.client.{self.app.name}.keyring",
|
||||||
|
"/etc/ceph/rbdmap",
|
||||||
|
],
|
||||||
|
exception_on_error=True,
|
||||||
|
)
|
||||||
|
ph.init_service(self.contexts())
|
||||||
|
else:
|
||||||
|
logging.debug(
|
||||||
|
f"Not running init for {ph.service_name},"
|
||||||
|
" container not ready"
|
||||||
|
)
|
||||||
|
raise sunbeam_guard.WaitingExceptionError(
|
||||||
|
"Payload container not ready"
|
||||||
|
)
|
||||||
|
super().init_container_services()
|
||||||
|
|
||||||
|
def default_container_configs(
|
||||||
|
self,
|
||||||
|
) -> List[sunbeam_core.ContainerConfigFile]:
|
||||||
|
"""Container configurations for handler."""
|
||||||
|
_cconfigs = super().default_container_configs()
|
||||||
|
_cconfigs.extend(
|
||||||
|
[
|
||||||
|
sunbeam_core.ContainerConfigFile(
|
||||||
|
"/etc/ceph/ceph.conf",
|
||||||
|
self.service_user,
|
||||||
|
self.service_group,
|
||||||
|
0o640,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
return _cconfigs
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main(GnocchiOperatorCharm)
|
main(GnocchiCephOperatorCharm)
|
||||||
|
61
src/templates/api-paste.ini.j2
Normal file
61
src/templates/api-paste.ini.j2
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
[composite:gnocchi+basic]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/ = gnocchiversions_pipeline
|
||||||
|
/v1 = gnocchiv1+noauth
|
||||||
|
/healthcheck = healthcheck
|
||||||
|
{% if ingress_public.ingress_path -%}
|
||||||
|
{{ ingress_public.ingress_path }}: gnocchiversions_pipeline
|
||||||
|
{{ ingress_public.ingress_path }}/v1 = gnocchiv1+noauth
|
||||||
|
{{ ingress_public.ingress_path }}/healthcheck = healthcheck
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
[composite:gnocchi+keystone]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/ = gnocchiversions_pipeline
|
||||||
|
/v1 = gnocchiv1+keystone
|
||||||
|
/healthcheck = healthcheck
|
||||||
|
{% if ingress_public.ingress_path -%}
|
||||||
|
{{ ingress_public.ingress_path }}: gnocchiversions_pipeline
|
||||||
|
{{ ingress_public.ingress_path }}/v1 = gnocchiv1+keystone
|
||||||
|
{{ ingress_public.ingress_path }}/healthcheck = healthcheck
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
[composite:gnocchi+remoteuser]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/ = gnocchiversions_pipeline
|
||||||
|
/v1 = gnocchiv1+noauth
|
||||||
|
/healthcheck = healthcheck
|
||||||
|
{% if ingress_public.ingress_path -%}
|
||||||
|
{{ ingress_public.ingress_path }}: gnocchiversions_pipeline
|
||||||
|
{{ ingress_public.ingress_path }}/v1 = gnocchiv1+noauth
|
||||||
|
{{ ingress_public.ingress_path }}/healthcheck = healthcheck
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
[pipeline:gnocchiv1+noauth]
|
||||||
|
pipeline = http_proxy_to_wsgi gnocchiv1
|
||||||
|
|
||||||
|
[pipeline:gnocchiv1+keystone]
|
||||||
|
pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1
|
||||||
|
|
||||||
|
[pipeline:gnocchiversions_pipeline]
|
||||||
|
pipeline = http_proxy_to_wsgi gnocchiversions
|
||||||
|
|
||||||
|
[app:gnocchiversions]
|
||||||
|
paste.app_factory = gnocchi.rest.app:app_factory
|
||||||
|
root = gnocchi.rest.api.VersionsController
|
||||||
|
|
||||||
|
[app:gnocchiv1]
|
||||||
|
paste.app_factory = gnocchi.rest.app:app_factory
|
||||||
|
root = gnocchi.rest.api.V1Controller
|
||||||
|
|
||||||
|
[filter:keystone_authtoken]
|
||||||
|
use = egg:keystonemiddleware#auth_token
|
||||||
|
oslo_config_project = gnocchi
|
||||||
|
|
||||||
|
[filter:http_proxy_to_wsgi]
|
||||||
|
use = egg:oslo.middleware#http_proxy_to_wsgi
|
||||||
|
oslo_config_project = gnocchi
|
||||||
|
|
||||||
|
[app:healthcheck]
|
||||||
|
use = egg:oslo.middleware#healthcheck
|
||||||
|
oslo_config_project = gnocchi
|
20
src/templates/gnocchi.conf.j2
Normal file
20
src/templates/gnocchi.conf.j2
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
debug = {{ options.debug }}
|
||||||
|
|
||||||
|
[api]
|
||||||
|
auth_mode = keystone
|
||||||
|
|
||||||
|
[indexer]
|
||||||
|
{% if database.connection -%}
|
||||||
|
url = {{ database.connection }}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
|
||||||
|
[storage]
|
||||||
|
driver = ceph
|
||||||
|
ceph_pool = gnocchi
|
||||||
|
ceph_username = gnocchi
|
||||||
|
ceph_conffile = /etc/ceph/ceph.conf
|
||||||
|
|
||||||
|
|
||||||
|
{% include "parts/section-identity" %}
|
@@ -1,10 +1,23 @@
|
|||||||
{% if identity_service.internal_host -%}
|
{% if identity_service.admin_auth_url -%}
|
||||||
www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
auth_url = {{ identity_service.admin_auth_url }}
|
||||||
|
interface = admin
|
||||||
|
{% elif identity_service.internal_auth_url -%}
|
||||||
|
auth_url = {{ identity_service.internal_auth_url }}
|
||||||
|
interface = internal
|
||||||
|
{% elif identity_service.internal_host -%}
|
||||||
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
||||||
|
interface = internal
|
||||||
|
{% endif -%}
|
||||||
|
{% if identity_service.public_auth_url -%}
|
||||||
|
www_authenticate_uri = {{ identity_service.public_auth_url }}
|
||||||
|
{% elif identity_service.internal_host -%}
|
||||||
|
www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
||||||
|
{% endif -%}
|
||||||
auth_type = password
|
auth_type = password
|
||||||
project_domain_name = {{ identity_service.service_domain_name }}
|
project_domain_name = {{ identity_service.service_domain_name }}
|
||||||
user_domain_name = {{ identity_service.service_domain_name }}
|
user_domain_name = {{ identity_service.service_domain_name }}
|
||||||
project_name = {{ identity_service.service_project_name }}
|
project_name = {{ identity_service.service_project_name }}
|
||||||
username = {{ identity_service.service_user_name }}
|
username = {{ identity_service.service_user_name }}
|
||||||
password = {{ identity_service.service_password }}
|
password = {{ identity_service.service_password }}
|
||||||
{% endif -%}
|
service_token_roles = {{ identity_service.admin_role }}
|
||||||
|
service_token_roles_required = True
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
Listen {{ wsgi_config.public_port }}
|
Listen {{ wsgi_config.public_port }}
|
||||||
|
|
||||||
<VirtualHost *:{{ wsgi_config.public_port }}>
|
<VirtualHost *:{{ wsgi_config.public_port }}>
|
||||||
WSGIDaemonProcess {{ wsgi_config.group }} processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \
|
WSGIDaemonProcess {{ wsgi_config.group }} processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \
|
||||||
display-name=%{GROUP}
|
display-name=%{GROUP}
|
||||||
WSGIProcessGroup {{ wsgi_config.group }}
|
WSGIProcessGroup {{ wsgi_config.group }}
|
||||||
{% if ingress_internal.ingress_path -%}
|
{% if ingress_internal.ingress_path -%}
|
||||||
WSGIScriptAlias {{ ingress_internal.ingress_path }} {{ wsgi_config.wsgi_public_script }}
|
WSGIScriptAlias {{ ingress_public.ingress_path }} {{ wsgi_config.wsgi_public_script }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }}
|
WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }}
|
||||||
WSGIApplicationGroup %{GLOBAL}
|
WSGIApplicationGroup %{GLOBAL}
|
||||||
|
@@ -1,27 +0,0 @@
|
|||||||
Listen {{ wsgi_config.public_port }}
|
|
||||||
<VirtualHost *:{{ wsgi_config.public_port }}>
|
|
||||||
WSGIDaemonProcess {{ wsgi_config.group }} processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \
|
|
||||||
display-name=%{GROUP}
|
|
||||||
WSGIProcessGroup {{ wsgi_config.group }}
|
|
||||||
{% if ingress_internal.ingress_path -%}
|
|
||||||
WSGIScriptAlias {{ ingress_internal.ingress_path }} {{ wsgi_config.wsgi_public_script }}
|
|
||||||
{% endif -%}
|
|
||||||
WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }}
|
|
||||||
WSGIApplicationGroup %{GLOBAL}
|
|
||||||
WSGIPassAuthorization On
|
|
||||||
<IfVersion >= 2.4>
|
|
||||||
ErrorLogFormat "%{cu}t %M"
|
|
||||||
</IfVersion>
|
|
||||||
ErrorLog {{ wsgi_config.error_log }}
|
|
||||||
CustomLog {{ wsgi_config.custom_log }} combined
|
|
||||||
|
|
||||||
<Directory /usr/bin>
|
|
||||||
<IfVersion >= 2.4>
|
|
||||||
Require all granted
|
|
||||||
</IfVersion>
|
|
||||||
<IfVersion < 2.4>
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</IfVersion>
|
|
||||||
</Directory>
|
|
||||||
</VirtualHost>
|
|
@@ -1,17 +1,9 @@
|
|||||||
# This file is managed centrally. If you find the need to modify this as a
|
# This file is managed centrally. If you find the need to modify this as a
|
||||||
# one-off, please don't. Intead, consult #openstack-charms and ask about
|
# one-off, please don't. Intead, consult #openstack-charms and ask about
|
||||||
# requirements management in charms via bot-control. Thank you.
|
# requirements management in charms via bot-control. Thank you.
|
||||||
charm-tools>=2.4.4
|
|
||||||
coverage>=3.6
|
coverage
|
||||||
mock>=1.2
|
mock
|
||||||
flake8>=2.2.4,<=2.4.1
|
flake8
|
||||||
pyflakes==2.1.1
|
stestr
|
||||||
stestr>=2.2.0
|
ops
|
||||||
requests>=2.18.4
|
|
||||||
psutil
|
|
||||||
# oslo.i18n dropped py35 support
|
|
||||||
oslo.i18n<4.0.0
|
|
||||||
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
|
|
||||||
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
|
|
||||||
pytz # workaround for 14.04 pip/tox
|
|
||||||
pyudev # for ceph-* charm unit tests (not mocked?)
|
|
||||||
|
62
tests/bundles/smoke.yaml
Normal file
62
tests/bundles/smoke.yaml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
bundle: kubernetes
|
||||||
|
applications:
|
||||||
|
|
||||||
|
mysql:
|
||||||
|
charm: ch:mysql-k8s
|
||||||
|
channel: 8.0/stable
|
||||||
|
scale: 1
|
||||||
|
trust: false
|
||||||
|
constraints: mem=1G
|
||||||
|
|
||||||
|
# Currently traefik is required for networking things.
|
||||||
|
# If this isn't present, the units will hang at "installing agent".
|
||||||
|
traefik:
|
||||||
|
charm: ch:traefik-k8s
|
||||||
|
channel: 1.0/stable
|
||||||
|
scale: 1
|
||||||
|
trust: true
|
||||||
|
|
||||||
|
traefik-public:
|
||||||
|
charm: ch:traefik-k8s
|
||||||
|
channel: 1.0/stable
|
||||||
|
scale: 1
|
||||||
|
trust: true
|
||||||
|
options:
|
||||||
|
kubernetes-service-annotations: metallb.universe.tf/address-pool=public
|
||||||
|
|
||||||
|
keystone:
|
||||||
|
charm: ch:keystone-k8s
|
||||||
|
channel: 2023.1/edge
|
||||||
|
scale: 1
|
||||||
|
trust: true
|
||||||
|
options:
|
||||||
|
admin-role: admin
|
||||||
|
storage:
|
||||||
|
fernet-keys: 5M
|
||||||
|
credential-keys: 5M
|
||||||
|
|
||||||
|
gnocchi:
|
||||||
|
charm: ../../gnocchi-k8s.charm
|
||||||
|
scale: 1
|
||||||
|
trust: true
|
||||||
|
resources:
|
||||||
|
gnocchi-api-image: ghcr.io/openstack-snaps/gnocchi-api:2023.1
|
||||||
|
gnocchi-metricd-image: ghcr.io/openstack-snaps/gnocchi-metricd:2023.1
|
||||||
|
|
||||||
|
relations:
|
||||||
|
- - traefik:ingress
|
||||||
|
- keystone:ingress-internal
|
||||||
|
- - traefik-public:ingress
|
||||||
|
- keystone:ingress-public
|
||||||
|
|
||||||
|
- - mysql:database
|
||||||
|
- keystone:database
|
||||||
|
|
||||||
|
- - mysql:database
|
||||||
|
- gnocchi:database
|
||||||
|
- - keystone:identity-service
|
||||||
|
- gnocchi:identity-service
|
||||||
|
- - traefik:ingress
|
||||||
|
- gnocchi:ingress-internal
|
||||||
|
- - traefik-public:ingress
|
||||||
|
- gnocchi:ingress-public
|
1
tests/config.yaml
Symbolic link
1
tests/config.yaml
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../config.yaml
|
@@ -1,35 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2023 Canonical Ltd.
|
|
||||||
# See LICENSE file for licensing details.
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
import yaml
|
|
||||||
from pytest_operator.plugin import OpsTest
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
|
|
||||||
APP_NAME = METADATA["name"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.abort_on_fail
|
|
||||||
async def test_build_and_deploy(ops_test: OpsTest):
|
|
||||||
"""Build the charm-under-test and deploy it together with related charms.
|
|
||||||
|
|
||||||
Assert on the unit status before any relations/configurations take place.
|
|
||||||
"""
|
|
||||||
# Build and deploy charm from local source folder
|
|
||||||
charm = await ops_test.build_charm(".")
|
|
||||||
resources = {"httpbin-image": METADATA["resources"]["httpbin-image"]["upstream-source"]}
|
|
||||||
|
|
||||||
# Deploy the charm and wait for active/idle status
|
|
||||||
await asyncio.gather(
|
|
||||||
ops_test.model.deploy(charm, resources=resources, application_name=APP_NAME),
|
|
||||||
ops_test.model.wait_for_idle(
|
|
||||||
apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000
|
|
||||||
),
|
|
||||||
)
|
|
35
tests/tests.yaml
Normal file
35
tests/tests.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
gate_bundles:
|
||||||
|
- smoke
|
||||||
|
smoke_bundles:
|
||||||
|
- smoke
|
||||||
|
# There is no storage provider at the moment so cannot run tests.
|
||||||
|
configure:
|
||||||
|
- zaza.charm_tests.noop.setup.basic_setup
|
||||||
|
tests:
|
||||||
|
- zaza.charm_tests.noop.tests.NoopTest
|
||||||
|
tests_options:
|
||||||
|
trust:
|
||||||
|
- smoke
|
||||||
|
ignore_hard_deploy_errors:
|
||||||
|
- smoke
|
||||||
|
|
||||||
|
tempest:
|
||||||
|
default:
|
||||||
|
smoke: True
|
||||||
|
|
||||||
|
target_deploy_status: []
|
||||||
|
traefik:
|
||||||
|
workload-status: active
|
||||||
|
workload-status-message-regex: '^$'
|
||||||
|
traefik-public:
|
||||||
|
workload-status: active
|
||||||
|
workload-status-message-regex: '^$'
|
||||||
|
keystone:
|
||||||
|
workload-status: active
|
||||||
|
workload-status-message-regex: '^$'
|
||||||
|
mysql:
|
||||||
|
workload-status: active
|
||||||
|
workload-status-message-regex: '^.*$'
|
||||||
|
gnocchi:
|
||||||
|
workload-status: blocked
|
||||||
|
workload-status-message-regex: '^.*ceph.*$'
|
17
tests/unit/__init__.py
Normal file
17
tests/unit/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Copyright 2023 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Unit tests for Gnocchi operator."""
|
@@ -1,68 +1,136 @@
|
|||||||
# Copyright 2023 Canonical Ltd.
|
#!/usr/bin/env python3
|
||||||
# See LICENSE file for licensing details.
|
|
||||||
|
# Copyright 2021 Canonical Ltd.
|
||||||
#
|
#
|
||||||
# Learn more about testing at: https://juju.is/docs/sdk/testing
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
import unittest
|
"""Tests for gnocchi charm."""
|
||||||
|
|
||||||
import ops
|
import ops_sunbeam.test_utils as test_utils
|
||||||
import ops.testing
|
|
||||||
from charm import GnocchiK8SCharm
|
import charm
|
||||||
|
|
||||||
|
|
||||||
class TestCharm(unittest.TestCase):
|
class _GnocchiCephOperatorCharm(charm.GnocchiCephOperatorCharm):
|
||||||
|
def __init__(self, framework):
|
||||||
|
self.seen_events = []
|
||||||
|
super().__init__(framework)
|
||||||
|
|
||||||
|
def _log_event(self, event):
|
||||||
|
self.seen_events.append(type(event).__name__)
|
||||||
|
|
||||||
|
def configure_charm(self, event):
|
||||||
|
super().configure_charm(event)
|
||||||
|
self._log_event(event)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def public_ingress_address(self):
|
||||||
|
return "gnocchi.juju"
|
||||||
|
|
||||||
|
|
||||||
|
class TestGnocchiCephOperatorCharm(test_utils.CharmTestCase):
|
||||||
|
"""Class for testing gnocchi charm."""
|
||||||
|
|
||||||
|
PATCHES = []
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.harness = ops.testing.Harness(GnocchiK8SCharm)
|
"""Run setup for unit tests."""
|
||||||
|
super().setUp(charm, self.PATCHES)
|
||||||
|
self.harness = test_utils.get_harness(
|
||||||
|
_GnocchiCephOperatorCharm, container_calls=self.container_calls
|
||||||
|
)
|
||||||
|
|
||||||
|
# clean up events that were dynamically defined,
|
||||||
|
# otherwise we get issues because they'll be redefined,
|
||||||
|
# which is not allowed.
|
||||||
|
from charms.data_platform_libs.v0.database_requires import (
|
||||||
|
DatabaseEvents,
|
||||||
|
)
|
||||||
|
|
||||||
|
for attr in (
|
||||||
|
"database_database_created",
|
||||||
|
"database_endpoints_changed",
|
||||||
|
"database_read_only_endpoints_changed",
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
delattr(DatabaseEvents, attr)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
self.addCleanup(self.harness.cleanup)
|
self.addCleanup(self.harness.cleanup)
|
||||||
|
test_utils.add_complete_ingress_relation(self.harness)
|
||||||
|
|
||||||
|
def test_pebble_ready_handler(self):
|
||||||
|
"""Test Pebble ready event is captured."""
|
||||||
self.harness.begin()
|
self.harness.begin()
|
||||||
|
self.assertEqual(self.harness.charm.seen_events, [])
|
||||||
|
test_utils.set_all_pebbles_ready(self.harness)
|
||||||
|
self.assertEqual(len(self.harness.charm.seen_events), 2)
|
||||||
|
|
||||||
def test_httpbin_pebble_ready(self):
|
def test_all_relations(self):
|
||||||
# Expected plan after Pebble ready with default config
|
"""Test all the charms relations."""
|
||||||
expected_plan = {
|
ceph_rel_id = self.harness.add_relation("ceph", "ceph-mon")
|
||||||
"services": {
|
self.harness.begin_with_initial_hooks()
|
||||||
"httpbin": {
|
self.harness.add_relation_unit(ceph_rel_id, "ceph-mon/0")
|
||||||
"override": "replace",
|
self.harness.update_relation_data(
|
||||||
"summary": "httpbin",
|
ceph_rel_id, "ceph-mon/0", {"ingress-address": "10.0.0.33"}
|
||||||
"command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent",
|
)
|
||||||
"startup": "enabled",
|
test_utils.add_ceph_relation_credentials(self.harness, ceph_rel_id)
|
||||||
"environment": {"GUNICORN_CMD_ARGS": "--log-level info"},
|
test_utils.add_db_relation_credentials(
|
||||||
}
|
self.harness, test_utils.add_base_db_relation(self.harness)
|
||||||
},
|
)
|
||||||
}
|
test_utils.add_identity_service_relation_response(
|
||||||
# Simulate the container coming up and emission of pebble-ready event
|
self.harness,
|
||||||
self.harness.container_pebble_ready("httpbin")
|
test_utils.add_base_identity_service_relation(self.harness),
|
||||||
# Get the plan now we've run PebbleReady
|
)
|
||||||
updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict()
|
|
||||||
# Check we've got the plan we expected
|
|
||||||
self.assertEqual(expected_plan, updated_plan)
|
|
||||||
# Check the service was started
|
|
||||||
service = self.harness.model.unit.get_container("httpbin").get_service("httpbin")
|
|
||||||
self.assertTrue(service.is_running())
|
|
||||||
# Ensure we set an ActiveStatus with no message
|
|
||||||
self.assertEqual(self.harness.model.unit.status, ops.ActiveStatus())
|
|
||||||
|
|
||||||
def test_config_changed_valid_can_connect(self):
|
self.harness.set_leader()
|
||||||
# Ensure the simulated Pebble API is reachable
|
test_utils.set_all_pebbles_ready(self.harness)
|
||||||
self.harness.set_can_connect("httpbin", True)
|
ceph_install_cmds = [
|
||||||
# Trigger a config-changed event with an updated value
|
[
|
||||||
self.harness.update_config({"log-level": "debug"})
|
"ceph-authtool",
|
||||||
# Get the plan now we've run PebbleReady
|
"/etc/ceph/ceph.client.gnocchi-k8s.keyring",
|
||||||
updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict()
|
"--create-keyring",
|
||||||
updated_env = updated_plan["services"]["httpbin"]["environment"]
|
"--name=client.gnocchi-k8s",
|
||||||
# Check the config change was effective
|
"--add-key=AQBUfpVeNl7CHxAA8/f6WTcYFxW2dJ5VyvWmJg==",
|
||||||
self.assertEqual(updated_env, {"GUNICORN_CMD_ARGS": "--log-level debug"})
|
],
|
||||||
self.assertEqual(self.harness.model.unit.status, ops.ActiveStatus())
|
[
|
||||||
|
"chown",
|
||||||
|
"gnocchi:gnocchi",
|
||||||
|
"/etc/ceph/ceph.client.gnocchi-k8s.keyring",
|
||||||
|
"/etc/ceph/rbdmap",
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"chmod",
|
||||||
|
"640",
|
||||||
|
"/etc/ceph/ceph.client.gnocchi-k8s.keyring",
|
||||||
|
"/etc/ceph/rbdmap",
|
||||||
|
],
|
||||||
|
]
|
||||||
|
for cmd in ceph_install_cmds:
|
||||||
|
self.assertIn(cmd, self.container_calls.execute["gnocchi-api"])
|
||||||
|
|
||||||
def test_config_changed_valid_cannot_connect(self):
|
app_setup_cmds = [
|
||||||
# Trigger a config-changed event with an updated value
|
["a2dissite", "gnocchi-api"],
|
||||||
self.harness.update_config({"log-level": "debug"})
|
["a2ensite", "wsgi-gnocchi-api"],
|
||||||
# Check the charm is in WaitingStatus
|
["gnocchi-upgrade"],
|
||||||
self.assertIsInstance(self.harness.model.unit.status, ops.WaitingStatus)
|
]
|
||||||
|
for cmd in app_setup_cmds:
|
||||||
|
self.assertIn(cmd, self.container_calls.execute["gnocchi-api"])
|
||||||
|
|
||||||
def test_config_changed_invalid(self):
|
for f in [
|
||||||
# Ensure the simulated Pebble API is reachable
|
"/etc/gnocchi/gnocchi.conf",
|
||||||
self.harness.set_can_connect("httpbin", True)
|
"/etc/ceph/ceph.conf",
|
||||||
# Trigger a config-changed event with an updated value
|
]:
|
||||||
self.harness.update_config({"log-level": "foobar"})
|
self.check_file("gnocchi-api", f)
|
||||||
# Check the charm is in BlockedStatus
|
self.check_file("gnocchi-metricd", f)
|
||||||
self.assertIsInstance(self.harness.model.unit.status, ops.BlockedStatus)
|
|
||||||
|
211
tox.ini
211
tox.ini
@@ -1,77 +1,80 @@
|
|||||||
# Operator charm (with zaza): tox.ini
|
# Operator charm (with zaza): tox.ini
|
||||||
|
|
||||||
[tox]
|
[tox]
|
||||||
envlist = pep8,py3
|
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
|
envlist = pep8,py3
|
||||||
sitepackages = False
|
sitepackages = False
|
||||||
# NOTE: Avoid false positives by not skipping missing interpreters.
|
|
||||||
skip_missing_interpreters = False
|
skip_missing_interpreters = False
|
||||||
# NOTES:
|
minversion = 3.18.0
|
||||||
# * We avoid the new dependency resolver by pinning pip < 20.3, see
|
|
||||||
# https://github.com/pypa/pip/issues/9187
|
[vars]
|
||||||
# * Pinning dependencies requires tox >= 3.2.0, see
|
src_path = {toxinidir}/src/
|
||||||
# https://tox.readthedocs.io/en/latest/config.html#conf-requires
|
tst_path = {toxinidir}/tests/
|
||||||
# * It is also necessary to pin virtualenv as a newer virtualenv would still
|
lib_path = {toxinidir}/lib/
|
||||||
# lead to fetching the latest pip in the func* tox targets, see
|
pyproject_toml = {toxinidir}/pyproject.toml
|
||||||
# https://stackoverflow.com/a/38133283
|
all_path = {[vars]src_path} {[vars]tst_path}
|
||||||
requires = pip < 20.3
|
|
||||||
virtualenv < 20.0
|
|
||||||
# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
|
|
||||||
minversion = 3.2.0
|
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
basepython = python3
|
||||||
PYTHONHASHSEED=0
|
setenv =
|
||||||
CHARM_DIR={envdir}
|
PYTHONPATH = {toxinidir}:{[vars]lib_path}:{[vars]src_path}
|
||||||
|
passenv =
|
||||||
|
HOME
|
||||||
|
PYTHONPATH
|
||||||
install_command =
|
install_command =
|
||||||
pip install {opts} {packages}
|
pip install {opts} {packages}
|
||||||
commands = stestr run --slowest {posargs}
|
commands = stestr run --slowest {posargs}
|
||||||
whitelist_externals =
|
allowlist_externals =
|
||||||
git
|
git
|
||||||
add-to-archive.py
|
charmcraft
|
||||||
bash
|
{toxinidir}/fetch-libs.sh
|
||||||
charmcraft
|
{toxinidir}/rename.sh
|
||||||
passenv = HOME TERM CS_* OS_* TEST_*
|
deps =
|
||||||
deps = -r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
|
||||||
[testenv:py35]
|
[testenv:fmt]
|
||||||
basepython = python3.5
|
description = Apply coding style standards to code
|
||||||
# python3.5 is irrelevant on a focal+ charm.
|
deps =
|
||||||
commands = /bin/true
|
black
|
||||||
|
isort
|
||||||
|
commands =
|
||||||
|
isort {[vars]all_path} --skip-glob {[vars]lib_path} --skip {toxinidir}/.tox
|
||||||
|
black --config {[vars]pyproject_toml} {[vars]all_path} --exclude {[vars]lib_path}
|
||||||
|
|
||||||
[testenv:py36]
|
[testenv:build]
|
||||||
basepython = python3.6
|
basepython = python3
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps =
|
||||||
-r{toxinidir}/test-requirements.txt
|
commands =
|
||||||
|
charmcraft -v pack
|
||||||
|
{toxinidir}/rename.sh
|
||||||
|
|
||||||
[testenv:py37]
|
[testenv:fetch]
|
||||||
basepython = python3.7
|
basepython = python3
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps =
|
||||||
-r{toxinidir}/test-requirements.txt
|
commands =
|
||||||
|
{toxinidir}/fetch-libs.sh
|
||||||
[testenv:py38]
|
|
||||||
basepython = python3.8
|
|
||||||
deps = -r{toxinidir}/requirements.txt
|
|
||||||
-r{toxinidir}/test-requirements.txt
|
|
||||||
|
|
||||||
[testenv:py3]
|
[testenv:py3]
|
||||||
basepython = python3
|
basepython = python3
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps =
|
||||||
-r{toxinidir}/test-requirements.txt
|
{[testenv]deps}
|
||||||
|
-r{toxinidir}/requirements.txt
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:py38]
|
||||||
basepython = python3
|
basepython = python3.8
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps = {[testenv:py3]deps}
|
||||||
-r{toxinidir}/test-requirements.txt
|
|
||||||
commands = flake8 {posargs} src unit_tests tests
|
[testenv:py39]
|
||||||
|
basepython = python3.9
|
||||||
|
deps = {[testenv:py3]deps}
|
||||||
|
|
||||||
|
[testenv:py310]
|
||||||
|
basepython = python3.10
|
||||||
|
deps = {[testenv:py3]deps}
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
# Technique based heavily upon
|
|
||||||
# https://github.com/openstack/nova/blob/master/tox.ini
|
|
||||||
basepython = python3
|
basepython = python3
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps = {[testenv:py3]deps}
|
||||||
-r{toxinidir}/test-requirements.txt
|
|
||||||
setenv =
|
setenv =
|
||||||
{[testenv]setenv}
|
{[testenv]setenv}
|
||||||
PYTHON=coverage run
|
PYTHON=coverage run
|
||||||
@@ -83,6 +86,66 @@ commands =
|
|||||||
coverage xml -o cover/coverage.xml
|
coverage xml -o cover/coverage.xml
|
||||||
coverage report
|
coverage report
|
||||||
|
|
||||||
|
[testenv:pep8]
|
||||||
|
description = Alias for lint
|
||||||
|
deps = {[testenv:lint]deps}
|
||||||
|
commands = {[testenv:lint]commands}
|
||||||
|
|
||||||
|
[testenv:lint]
|
||||||
|
description = Check code against coding style standards
|
||||||
|
deps =
|
||||||
|
black
|
||||||
|
flake8<6 # Pin version until https://github.com/savoirfairelinux/flake8-copyright/issues/19 is merged
|
||||||
|
flake8-docstrings
|
||||||
|
flake8-copyright
|
||||||
|
flake8-builtins
|
||||||
|
pyproject-flake8
|
||||||
|
pep8-naming
|
||||||
|
isort
|
||||||
|
codespell
|
||||||
|
commands =
|
||||||
|
codespell {[vars]all_path}
|
||||||
|
# pflake8 wrapper supports config from pyproject.toml
|
||||||
|
pflake8 --exclude {[vars]lib_path} --config {toxinidir}/pyproject.toml {[vars]all_path}
|
||||||
|
isort --check-only --diff {[vars]all_path} --skip-glob {[vars]lib_path}
|
||||||
|
black --config {[vars]pyproject_toml} --check --diff {[vars]all_path} --exclude {[vars]lib_path}
|
||||||
|
|
||||||
|
[testenv:func-noop]
|
||||||
|
basepython = python3
|
||||||
|
deps =
|
||||||
|
git+https://github.com/openstack-charmers/zaza.git@libjuju-3.1#egg=zaza
|
||||||
|
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
|
||||||
|
git+https://opendev.org/openstack/tempest.git#egg=tempest
|
||||||
|
commands =
|
||||||
|
functest-run-suite --help
|
||||||
|
|
||||||
|
[testenv:func]
|
||||||
|
basepython = python3
|
||||||
|
deps = {[testenv:func-noop]deps}
|
||||||
|
commands =
|
||||||
|
functest-run-suite --keep-model
|
||||||
|
|
||||||
|
[testenv:func-smoke]
|
||||||
|
basepython = python3
|
||||||
|
deps = {[testenv:func-noop]deps}
|
||||||
|
setenv =
|
||||||
|
TEST_MODEL_SETTINGS = automatically-retry-hooks=true
|
||||||
|
TEST_MAX_RESOLVE_COUNT = 5
|
||||||
|
commands =
|
||||||
|
functest-run-suite --keep-model --smoke
|
||||||
|
|
||||||
|
[testenv:func-dev]
|
||||||
|
basepython = python3
|
||||||
|
deps = {[testenv:func-noop]deps}
|
||||||
|
commands =
|
||||||
|
functest-run-suite --keep-model --dev
|
||||||
|
|
||||||
|
[testenv:func-target]
|
||||||
|
basepython = python3
|
||||||
|
deps = {[testenv:func-noop]deps}
|
||||||
|
commands =
|
||||||
|
functest-run-suite --keep-model --bundle {posargs}
|
||||||
|
|
||||||
[coverage:run]
|
[coverage:run]
|
||||||
branch = True
|
branch = True
|
||||||
concurrency = multiprocessing
|
concurrency = multiprocessing
|
||||||
@@ -91,44 +154,8 @@ source =
|
|||||||
.
|
.
|
||||||
omit =
|
omit =
|
||||||
.tox/*
|
.tox/*
|
||||||
*/charmhelpers/*
|
tests/*
|
||||||
unit_tests/*
|
src/templates/*
|
||||||
|
|
||||||
[testenv:venv]
|
|
||||||
basepython = python3
|
|
||||||
commands = {posargs}
|
|
||||||
|
|
||||||
[testenv:build]
|
|
||||||
basepython = python3
|
|
||||||
deps = -r{toxinidir}/build-requirements.txt
|
|
||||||
commands =
|
|
||||||
charmcraft build
|
|
||||||
|
|
||||||
[testenv:func-noop]
|
|
||||||
basepython = python3
|
|
||||||
commands =
|
|
||||||
functest-run-suite --help
|
|
||||||
|
|
||||||
[testenv:func]
|
|
||||||
basepython = python3
|
|
||||||
commands =
|
|
||||||
functest-run-suite --keep-model
|
|
||||||
|
|
||||||
[testenv:func-smoke]
|
|
||||||
basepython = python3
|
|
||||||
commands =
|
|
||||||
functest-run-suite --keep-model --smoke
|
|
||||||
|
|
||||||
[testenv:func-dev]
|
|
||||||
basepython = python3
|
|
||||||
commands =
|
|
||||||
functest-run-suite --keep-model --dev
|
|
||||||
|
|
||||||
[testenv:func-target]
|
|
||||||
basepython = python3
|
|
||||||
commands =
|
|
||||||
functest-run-suite --keep-model --bundle {posargs}
|
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
# Ignore E902 because the unit_tests directory is missing in the built charm.
|
ignore=E226,W504
|
||||||
ignore = E402,E226,E902
|
|
||||||
|
Reference in New Issue
Block a user