diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index f466f9a5..00000000 --- a/.coveragerc +++ /dev/null @@ -1,8 +0,0 @@ -[run] -branch = True -source = aodh -omit = aodh/tests/* - -[report] -ignore_errors = True - diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 8f7c95b1..00000000 --- a/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -*.egg* -*.mo -*.pyc -*~ -.*.swp -.*sw? -.coverage -.testrepository -.tox -AUTHORS -build/* -ChangeLog -cover/* -dist/* -doc/build -doc/source/_static/ -etc/aodh/aodh.conf -subunit.log - -# Files created by releasenotes build -releasenotes/build -/doc/source/contributor/api/ diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 5f58c710..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/aodh.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index d8094276..00000000 --- a/.mailmap +++ /dev/null @@ -1,33 +0,0 @@ -# Format is: -# -# -Adam Gandelman -Alan Pevec -Alexei Kornienko -ChangBo Guo(gcb) Chang Bo Guo -Chinmaya Bharadwaj chinmay -Clark Boylan -Doug Hellmann -Fei Long Wang -Fengqian Gao Fengqian -Fengqian Gao Fengqian.Gao -Gordon Chung gordon chung -Gordon Chung Gordon Chung -Gordon Chung gordon chung -Ildiko Vancsa Ildiko -John H. Tran John Tran -Julien Danjou -LiuSheng liu-sheng -Mehdi Abaakouk -Nejc Saje -Nejc Saje -Nicolas Barcet (nijaba) -Pádraig Brady -Rich Bowen -Sandy Walsh -Sascha Peilicke -Sean Dague -Shengjie Min shengjie-min -Shuangtai Tian shuangtai -Swann Croiset -ZhiQiang Fan diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index c242a982..00000000 --- a/.testr.conf +++ /dev/null @@ -1,9 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-600} \ - ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./aodh/tests} -t . $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list -# NOTE(chdent): Only used/matches on gabbi-related tests. -group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_([^_]+))_ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 055d4fee..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/aodh diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 310239e1..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,9 +0,0 @@ -Aodh Style Commandments -======================= - -- Step 1: Read the OpenStack Style Commandments - https://docs.openstack.org/hacking/latest/ -- Step 2: Read on - -Aodh Specific Commandments --------------------------- diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index d4683b55..00000000 --- a/MAINTAINERS +++ /dev/null @@ -1,15 +0,0 @@ -= Generalist Code Reviewers = - -The current members of aodh-core are listed here: - - https://launchpad.net/~aodh-drivers/+members#active - -This group can +2 and approve patches in aodh. However, they may -choose to seek feedback from the appropriate specialist maintainer before -approving a patch if it is in any way controversial or risky. - -= IRC handles of maintainers = -gordc -jd_ -llu -sileht diff --git a/README b/README new file mode 100644 index 00000000..8fcd2b2f --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 5aa8d9ac..00000000 --- a/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -aodh -==== - -Release notes can be read online at: - https://docs.openstack.org/aodh/latest/contributor/releasenotes/index.html - -Documentation for the project can be found at: - https://docs.openstack.org/aodh/latest/ - -The project home is at: - https://launchpad.net/aodh diff --git a/aodh/__init__.py b/aodh/__init__.py deleted file mode 100644 index 676c802f..00000000 --- a/aodh/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NotImplementedError(NotImplementedError): - # FIXME(jd) This is used by WSME to return a correct HTTP code. We should - # not expose it here but wrap our methods in the API to convert it to a - # proper HTTP error. - code = 501 diff --git a/aodh/api/__init__.py b/aodh/api/__init__.py deleted file mode 100644 index f2a76d59..00000000 --- a/aodh/api/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from oslo_config import cfg - -# Register options for the service -OPTS = [ - cfg.StrOpt('paste_config', - default=os.path.abspath( - os.path.join( - os.path.dirname(__file__), "api-paste.ini")), - help="Configuration file for WSGI definition of API."), - cfg.StrOpt( - 'auth_mode', - default="keystone", - help="Authentication mode to use. Unset to disable authentication"), -] diff --git a/aodh/api/api-paste.ini b/aodh/api/api-paste.ini deleted file mode 100644 index bccbb26c..00000000 --- a/aodh/api/api-paste.ini +++ /dev/null @@ -1,47 +0,0 @@ -[composite:aodh+noauth] -use = egg:Paste#urlmap -/ = aodhversions_pipeline -/v2 = aodhv2_noauth_pipeline -/healthcheck = healthcheck - -[composite:aodh+keystone] -use = egg:Paste#urlmap -/ = aodhversions_pipeline -/v2 = aodhv2_keystone_pipeline -/healthcheck = healthcheck - -[app:healthcheck] -use = egg:oslo.middleware#healthcheck -oslo_config_project = aodh - -[pipeline:aodhversions_pipeline] -pipeline = cors http_proxy_to_wsgi aodhversions - -[app:aodhversions] -paste.app_factory = aodh.api.app:app_factory -root = aodh.api.controllers.root.VersionsController - -[pipeline:aodhv2_keystone_pipeline] -pipeline = cors http_proxy_to_wsgi request_id authtoken aodhv2 - -[pipeline:aodhv2_noauth_pipeline] -pipeline = cors http_proxy_to_wsgi request_id aodhv2 - -[app:aodhv2] -paste.app_factory = aodh.api.app:app_factory -root = aodh.api.controllers.v2.root.V2Controller - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -oslo_config_project = aodh - -[filter:request_id] -paste.filter_factory = oslo_middleware:RequestId.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = aodh - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory -oslo_config_project = aodh diff --git a/aodh/api/app.py b/aodh/api/app.py deleted file mode 100644 index b7c09001..00000000 --- a/aodh/api/app.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2015-2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import uuid - -from oslo_config import cfg -from oslo_log import log -from paste import deploy -import pecan - -from aodh.api import hooks -from aodh.api import middleware -from aodh import service -from aodh import storage - -LOG = log.getLogger(__name__) - - -# NOTE(sileht): pastedeploy uses ConfigParser to handle -# global_conf, since python 3 ConfigParser doesn't -# allow storing object as config value, only strings are -# permit, so to be able to pass an object created before paste load -# the app, we store them into a global var. But the each loaded app -# store it's configuration in unique key to be concurrency safe. -global APPCONFIGS -APPCONFIGS = {} - - -def setup_app(root, conf): - app_hooks = [hooks.ConfigHook(conf), - hooks.DBHook( - storage.get_connection_from_config(conf)), - hooks.TranslationHook()] - return pecan.make_app( - root, - hooks=app_hooks, - wrap_app=middleware.ParsableErrorMiddleware, - guess_content_type_from_ext=False - ) - - -def load_app(conf): - global APPCONFIGS - - # Build the WSGI app - cfg_path = conf.api.paste_config - if not os.path.isabs(cfg_path): - cfg_path = conf.find_file(cfg_path) - - if cfg_path is None or not os.path.exists(cfg_path): - raise cfg.ConfigFilesNotFoundError([conf.api.paste_config]) - - config = dict(conf=conf) - configkey = str(uuid.uuid4()) - APPCONFIGS[configkey] = config - - LOG.info("WSGI config used: %s", cfg_path) - return deploy.loadapp("config:" + cfg_path, - name="aodh+" + ( - conf.api.auth_mode - if conf.api.auth_mode else "noauth" - ), - global_conf={'configkey': configkey}) - - -def app_factory(global_config, **local_conf): - global APPCONFIGS - appconfig = APPCONFIGS.get(global_config.get('configkey')) - return setup_app(root=local_conf.get('root'), **appconfig) - - -def build_wsgi_app(argv=None): - return load_app(service.prepare_service(argv=argv)) diff --git a/aodh/api/app.wsgi b/aodh/api/app.wsgi deleted file mode 100644 index fb8eb603..00000000 --- a/aodh/api/app.wsgi +++ /dev/null @@ -1,23 +0,0 @@ -# -*- mode: python -*- -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Use this file for deploying the API under mod_wsgi. - -See http://pecan.readthedocs.org/en/latest/deployment.html for details. -""" -from aodh.api import app - -application = app.build_wsgi_app(argv=[]) diff --git a/aodh/api/controllers/__init__.py b/aodh/api/controllers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/api/controllers/root.py b/aodh/api/controllers/root.py deleted file mode 100644 index b66d1770..00000000 --- a/aodh/api/controllers/root.py +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan - -MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json' -MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml' - - -class VersionsController(object): - - @pecan.expose('json') - def index(self): - base_url = pecan.request.host_url - available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }] - collected = [version_descriptor(base_url, v['tag'], v['date']) - for v in available] - versions = {'versions': {'values': collected}} - return versions - - -def version_descriptor(base_url, version, released_on): - url = version_url(base_url, version) - return { - 'id': version, - 'links': [ - {'href': url, 'rel': 'self', }, - {'href': 'http://docs.openstack.org/', - 'rel': 'describedby', 'type': 'text/html', }], - 'media-types': [ - {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }, - {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }], - 'status': 'stable', - 'updated': released_on, - } - - -def version_url(base_url, version_number): - return '%s/%s' % (base_url, version_number) diff --git a/aodh/api/controllers/v2/__init__.py b/aodh/api/controllers/v2/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/api/controllers/v2/alarm_rules/__init__.py b/aodh/api/controllers/v2/alarm_rules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/api/controllers/v2/alarm_rules/composite.py b/aodh/api/controllers/v2/alarm_rules/composite.py deleted file mode 100644 index 5062a899..00000000 --- a/aodh/api/controllers/v2/alarm_rules/composite.py +++ /dev/null @@ -1,119 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json - -from stevedore import named -from wsme.rest import json as wjson -from wsme import types as wtypes - -from aodh.api.controllers.v2 import base -from aodh.i18n import _ - - -class InvalidCompositeRule(base.ClientSideError): - def __init__(self, error): - err = _('Invalid input composite rule: %s, it should ' - 'be a dict with an "and" or "or" as key, and the ' - 'value of dict should be a list of basic threshold ' - 'rules or sub composite rules, can be nested.') % error - super(InvalidCompositeRule, self).__init__(err) - - -class CompositeRule(wtypes.UserType): - """Composite alarm rule. - - A simple dict type to preset composite rule. - """ - - basetype = wtypes.text - name = 'composite_rule' - - threshold_plugins = None - - def __init__(self): - threshold_rules = ('threshold', - 'gnocchi_resources_threshold', - 'gnocchi_aggregation_by_metrics_threshold', - 'gnocchi_aggregation_by_resources_threshold') - CompositeRule.threshold_plugins = named.NamedExtensionManager( - "aodh.alarm.rule", threshold_rules) - super(CompositeRule, self).__init__() - - @staticmethod - def valid_composite_rule(rules): - if isinstance(rules, dict) and len(rules) == 1: - and_or_key = list(rules)[0] - if and_or_key not in ('and', 'or'): - raise base.ClientSideError( - _('Threshold rules should be combined with "and" or "or"')) - if isinstance(rules[and_or_key], list): - for sub_rule in rules[and_or_key]: - CompositeRule.valid_composite_rule(sub_rule) - else: - raise InvalidCompositeRule(rules) - elif isinstance(rules, dict): - rule_type = rules.pop('type', None) - if not rule_type: - raise base.ClientSideError(_('type must be set in every rule')) - - if rule_type not in CompositeRule.threshold_plugins: - plugins = sorted(CompositeRule.threshold_plugins.names()) - err = _('Unsupported sub-rule type :%(rule)s in composite ' - 'rule, should be one of: %(plugins)s') % { - 'rule': rule_type, - 'plugins': plugins} - raise base.ClientSideError(err) - plugin = CompositeRule.threshold_plugins[rule_type].plugin - wjson.fromjson(plugin, rules) - rule_dict = plugin(**rules).as_dict() - rules.update(rule_dict) - rules.update(type=rule_type) - else: - raise InvalidCompositeRule(rules) - - @staticmethod - def validate(value): - try: - json.dumps(value) - except TypeError: - raise base.ClientSideError(_('%s is not JSON serializable') - % value) - else: - CompositeRule.valid_composite_rule(value) - return value - - @staticmethod - def frombasetype(value): - return CompositeRule.validate(value) - - @staticmethod - def create_hook(alarm): - pass - - @staticmethod - def validate_alarm(alarm): - pass - - @staticmethod - def update_hook(alarm): - pass - - @staticmethod - def as_dict(): - pass - - @staticmethod - def __call__(**rule): - return rule - -composite_rule = CompositeRule() diff --git a/aodh/api/controllers/v2/alarm_rules/event.py b/aodh/api/controllers/v2/alarm_rules/event.py deleted file mode 100644 index 60d0fbe8..00000000 --- a/aodh/api/controllers/v2/alarm_rules/event.py +++ /dev/null @@ -1,61 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import wsme -from wsme import types as wtypes - -from aodh.api.controllers.v2 import base -from aodh.i18n import _ - - -class AlarmEventRule(base.AlarmRule): - """Alarm Event Rule. - - Describe when to trigger the alarm based on an event - """ - - event_type = wsme.wsattr(wtypes.text) - "The type of event (default is '*')" - - query = wsme.wsattr([base.Query]) - "The query to find the event (default is [])" - - def __init__(self, event_type=None, query=None): - event_type = event_type or '*' - query = [base.Query(**q) for q in query or []] - super(AlarmEventRule, self).__init__(event_type=event_type, - query=query) - - @classmethod - def validate_alarm(cls, alarm): - for i in alarm.event_rule.query: - i._get_value_as_type() - - @property - def default_description(self): - return _('Alarm when %s event occurred.') % self.event_type - - def as_dict(self): - rule = self.as_dict_from_keys(['event_type']) - rule['query'] = [q.as_dict() for q in self.query] - return rule - - @classmethod - def sample(cls): - return cls(event_type='compute.instance.update', - query=[{'field': 'traits.instance_id"', - 'value': '153462d0-a9b8-4b5b-8175-9e4b05e9b856', - 'op': 'eq', - 'type': 'string'}]) diff --git a/aodh/api/controllers/v2/alarm_rules/gnocchi.py b/aodh/api/controllers/v2/alarm_rules/gnocchi.py deleted file mode 100644 index 25663d2a..00000000 --- a/aodh/api/controllers/v2/alarm_rules/gnocchi.py +++ /dev/null @@ -1,215 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import threading - -import cachetools -from gnocchiclient import client -from gnocchiclient import exceptions -from keystoneauth1 import exceptions as ka_exceptions -from oslo_config import cfg -from oslo_serialization import jsonutils -import pecan -import wsme -from wsme import types as wtypes - -from aodh.api.controllers.v2 import base -from aodh.api.controllers.v2 import utils as v2_utils -from aodh import keystone_client - - -GNOCCHI_OPTS = [ - cfg.StrOpt('gnocchi_external_project_owner', - default="service", - help='Project name of resources creator in Gnocchi. ' - '(For example the Ceilometer project name'), -] - - -class GnocchiUnavailable(Exception): - code = 503 - - -class AlarmGnocchiThresholdRule(base.AlarmRule): - comparison_operator = base.AdvEnum('comparison_operator', str, - 'lt', 'le', 'eq', 'ne', 'ge', 'gt', - default='eq') - "The comparison against the alarm threshold" - - threshold = wsme.wsattr(float, mandatory=True) - "The threshold of the alarm" - - aggregation_method = wsme.wsattr(wtypes.text, mandatory=True) - "The aggregation_method to compare to the threshold" - - evaluation_periods = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - "The number of historical periods to evaluate the threshold" - - granularity = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60) - "The time range in seconds over which query" - - cache = cachetools.TTLCache(maxsize=1, ttl=3600) - lock = threading.RLock() - - @classmethod - def validate_alarm(cls, alarm): - alarm_rule = getattr(alarm, "%s_rule" % alarm.type) - aggregation_method = alarm_rule.aggregation_method - if aggregation_method not in cls._get_aggregation_methods(): - raise base.ClientSideError( - 'aggregation_method should be in %s not %s' % ( - cls._get_aggregation_methods(), aggregation_method)) - - @staticmethod - @cachetools.cached(cache, lock=lock) - def _get_aggregation_methods(): - conf = pecan.request.cfg - gnocchi_client = client.Client( - '1', keystone_client.get_session(conf), - interface=conf.service_credentials.interface, - region_name=conf.service_credentials.region_name) - - try: - return gnocchi_client.capabilities.list().get( - 'aggregation_methods', []) - except exceptions.ClientException as e: - raise base.ClientSideError(e.message, status_code=e.code) - except Exception as e: - raise GnocchiUnavailable(e) - - -class MetricOfResourceRule(AlarmGnocchiThresholdRule): - metric = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the metric" - - resource_id = wsme.wsattr(wtypes.text, mandatory=True) - "The id of a resource" - - resource_type = wsme.wsattr(wtypes.text, mandatory=True) - "The resource type" - - def as_dict(self): - rule = self.as_dict_from_keys(['granularity', 'comparison_operator', - 'threshold', 'aggregation_method', - 'evaluation_periods', - 'metric', - 'resource_id', - 'resource_type']) - return rule - - -class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule): - metric = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the metric" - - query = wsme.wsattr(wtypes.text, mandatory=True) - ('The query to filter the metric, Don\'t forget to filter out ' - 'deleted resources (example: {"and": [{"=": {"ended_at": null}}, ...]}), ' - 'Otherwise Gnocchi will try to create the aggregate against obsolete ' - 'resources') - - resource_type = wsme.wsattr(wtypes.text, mandatory=True) - "The resource type" - - def as_dict(self): - rule = self.as_dict_from_keys(['granularity', 'comparison_operator', - 'threshold', 'aggregation_method', - 'evaluation_periods', - 'metric', - 'query', - 'resource_type']) - return rule - - cache = cachetools.TTLCache(maxsize=1, ttl=3600) - lock = threading.RLock() - - @staticmethod - @cachetools.cached(cache, lock=lock) - def get_external_project_owner(): - kc = keystone_client.get_client(pecan.request.cfg) - project_name = pecan.request.cfg.api.gnocchi_external_project_owner - try: - project = kc.projects.find(name=project_name) - return project.id - except ka_exceptions.NotFound: - return None - - @classmethod - def validate_alarm(cls, alarm): - super(AggregationMetricByResourcesLookupRule, - cls).validate_alarm(alarm) - - rule = alarm.gnocchi_aggregation_by_resources_threshold_rule - - # check the query string is a valid json - try: - query = jsonutils.loads(rule.query) - except ValueError: - raise wsme.exc.InvalidInput('rule/query', rule.query) - - conf = pecan.request.cfg - - # Scope the alarm to the project id if needed - auth_project = v2_utils.get_auth_project(alarm.project_id) - if auth_project: - - perms_filter = {"=": {"created_by_project_id": auth_project}} - - external_project_owner = cls.get_external_project_owner() - if external_project_owner: - perms_filter = {"or": [ - perms_filter, - {"and": [ - {"=": {"created_by_project_id": - external_project_owner}}, - {"=": {"project_id": auth_project}}]} - ]} - - query = {"and": [perms_filter, query]} - rule.query = jsonutils.dumps(query) - - gnocchi_client = client.Client( - '1', keystone_client.get_session(conf), - interface=conf.service_credentials.interface, - region_name=conf.service_credentials.region_name) - - try: - gnocchi_client.metric.aggregation( - metrics=rule.metric, - query=query, - aggregation=rule.aggregation_method, - needed_overlap=0, - resource_type=rule.resource_type) - except exceptions.ClientException as e: - if e.code == 404: - # NOTE(sileht): We are fine here, we just want to ensure the - # 'query' payload is valid for Gnocchi If the metric - # doesn't exists yet, it doesn't matter - return - raise base.ClientSideError(e.message, status_code=e.code) - except Exception as e: - raise GnocchiUnavailable(e) - - -class AggregationMetricsByIdLookupRule(AlarmGnocchiThresholdRule): - metrics = wsme.wsattr([wtypes.text], mandatory=True) - "A list of metric Ids" - - def as_dict(self): - rule = self.as_dict_from_keys(['granularity', 'comparison_operator', - 'threshold', 'aggregation_method', - 'evaluation_periods', - 'metrics']) - return rule diff --git a/aodh/api/controllers/v2/alarm_rules/threshold.py b/aodh/api/controllers/v2/alarm_rules/threshold.py deleted file mode 100644 index 5c30d28e..00000000 --- a/aodh/api/controllers/v2/alarm_rules/threshold.py +++ /dev/null @@ -1,161 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometerclient import client as ceiloclient -from ceilometerclient import exc as ceiloexc -import pecan -import wsme -from wsme import types as wtypes - -from aodh.api.controllers.v2 import base -from aodh.api.controllers.v2 import utils as v2_utils -from aodh.i18n import _ -from aodh import keystone_client -from aodh import storage - - -class AlarmThresholdRule(base.AlarmRule): - """Alarm Threshold Rule - - Describe when to trigger the alarm based on computed statistics - """ - - meter_name = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the meter" - - # FIXME(sileht): default doesn't work - # workaround: default is set in validate method - query = wsme.wsattr([base.Query], default=[]) - """The query to find the data for computing statistics. - Ownership settings are automatically included based on the Alarm owner. - """ - - period = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60) - "The time range in seconds over which query" - - comparison_operator = base.AdvEnum('comparison_operator', str, - 'lt', 'le', 'eq', 'ne', 'ge', 'gt', - default='eq') - "The comparison against the alarm threshold" - - threshold = wsme.wsattr(float, mandatory=True) - "The threshold of the alarm" - - statistic = base.AdvEnum('statistic', str, 'max', 'min', 'avg', 'sum', - 'count', default='avg') - "The statistic to compare to the threshold" - - evaluation_periods = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) - "The number of historical periods to evaluate the threshold" - - exclude_outliers = wsme.wsattr(bool, default=False) - "Whether datapoints with anomalously low sample counts are excluded" - - ceilometer_sample_api_is_supported = None - - def __init__(self, query=None, **kwargs): - query = [base.Query(**q) for q in query] if query else [] - super(AlarmThresholdRule, self).__init__(query=query, **kwargs) - - @classmethod - def _check_ceilometer_sample_api(cls): - # Check it only once - if cls.ceilometer_sample_api_is_supported is None: - - auth_config = pecan.request.cfg.service_credentials - client = ceiloclient.get_client( - version=2, - session=keystone_client.get_session(pecan.request.cfg), - # ceiloclient adapter options - region_name=auth_config.region_name, - interface=auth_config.interface, - ) - try: - client.statistics.list( - meter_name="idontthinkthatexistsbutwhatever") - except Exception as e: - if isinstance(e, ceiloexc.HTTPException): - if e.code == 410: - cls.ceilometer_sample_api_is_supported = False - elif e.code < 500: - cls.ceilometer_sample_api_is_supported = True - else: - raise - else: - raise - else: - # I don't think this meter can exist but how known - cls.ceilometer_sample_api_is_supported = True - - if cls.ceilometer_sample_api_is_supported is False: - raise base.ClientSideError( - "This telemetry installation is not configured to support" - "alarm of type 'threshold") - - @staticmethod - def validate(threshold_rule): - # note(sileht): wsme default doesn't work in some case - # workaround for https://bugs.launchpad.net/wsme/+bug/1227039 - if not threshold_rule.query: - threshold_rule.query = [] - - # Timestamp is not allowed for AlarmThresholdRule query, as the alarm - # evaluator will construct timestamp bounds for the sequence of - # statistics queries as the sliding evaluation window advances - # over time. - v2_utils.validate_query(threshold_rule.query, - storage.SampleFilter.__init__, - allow_timestamps=False) - return threshold_rule - - @classmethod - def validate_alarm(cls, alarm): - cls._check_ceilometer_sample_api() - # ensure an implicit constraint on project_id is added to - # the query if not already present - alarm.threshold_rule.query = v2_utils.sanitize_query( - alarm.threshold_rule.query, - storage.SampleFilter.__init__, - on_behalf_of=alarm.project_id - ) - - @property - def default_description(self): - return (_('Alarm when %(meter_name)s is %(comparison_operator)s a ' - '%(statistic)s of %(threshold)s over %(period)s seconds') % - dict(comparison_operator=self.comparison_operator, - statistic=self.statistic, - threshold=self.threshold, - meter_name=self.meter_name, - period=self.period)) - - def as_dict(self): - rule = self.as_dict_from_keys(['period', 'comparison_operator', - 'threshold', 'statistic', - 'evaluation_periods', 'meter_name', - 'exclude_outliers']) - rule['query'] = [q.as_dict() for q in self.query] - return rule - - @classmethod - def sample(cls): - return cls(meter_name='cpu_util', - period=60, - evaluation_periods=1, - threshold=300.0, - statistic='avg', - comparison_operator='gt', - query=[{'field': 'resource_id', - 'value': '2a4d689b-f0b8-49c1-9eef-87cae58d80db', - 'op': 'eq', - 'type': 'string'}]) diff --git a/aodh/api/controllers/v2/alarms.py b/aodh/api/controllers/v2/alarms.py deleted file mode 100644 index 823edbf1..00000000 --- a/aodh/api/controllers/v2/alarms.py +++ /dev/null @@ -1,841 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import itertools -import json -import warnings - -import croniter -import debtcollector -from oslo_config import cfg -from oslo_log import log -from oslo_utils import netutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import pecan -from pecan import rest -import pytz -import six -from six.moves.urllib import parse as urlparse -from stevedore import extension -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -import aodh -from aodh.api.controllers.v2 import base -from aodh.api.controllers.v2 import utils as v2_utils -from aodh.api import rbac -from aodh.i18n import _ -from aodh import keystone_client -from aodh import messaging -from aodh import notifier -from aodh.storage import models - -LOG = log.getLogger(__name__) - - -ALARM_API_OPTS = [ - cfg.IntOpt('user_alarm_quota', - deprecated_group='DEFAULT', - help='Maximum number of alarms defined for a user.' - ), - cfg.IntOpt('project_alarm_quota', - deprecated_group='DEFAULT', - help='Maximum number of alarms defined for a project.' - ), - cfg.IntOpt('alarm_max_actions', - default=-1, - deprecated_group='DEFAULT', - help='Maximum count of actions for each state of an alarm, ' - 'non-positive number means no limit.'), -] - -state_kind = ["ok", "alarm", "insufficient data"] -state_kind_enum = wtypes.Enum(str, *state_kind) -severity_kind = ["low", "moderate", "critical"] -severity_kind_enum = wtypes.Enum(str, *severity_kind) - -ALARM_REASON_DEFAULT = "Not evaluated yet" -ALARM_REASON_MANUAL = "Manually set via API" - - -class OverQuota(base.ClientSideError): - def __init__(self, data): - d = { - 'u': data.user_id, - 'p': data.project_id - } - super(OverQuota, self).__init__( - _("Alarm quota exceeded for user %(u)s on project %(p)s") % d, - status_code=403) - - -def is_over_quota(conn, project_id, user_id): - """Returns False if an alarm is within the set quotas, True otherwise. - - :param conn: a backend connection object - :param project_id: the ID of the project setting the alarm - :param user_id: the ID of the user setting the alarm - """ - - over_quota = False - - # Start by checking for user quota - user_alarm_quota = pecan.request.cfg.api.user_alarm_quota - if user_alarm_quota is not None: - user_alarms = list(conn.get_alarms(user=user_id)) - over_quota = len(user_alarms) >= user_alarm_quota - - # If the user quota isn't reached, we check for the project quota - if not over_quota: - project_alarm_quota = pecan.request.cfg.api.project_alarm_quota - if project_alarm_quota is not None: - project_alarms = list(conn.get_alarms(project=project_id)) - over_quota = len(project_alarms) >= project_alarm_quota - - return over_quota - - -class CronType(wtypes.UserType): - """A user type that represents a cron format.""" - basetype = six.string_types - name = 'cron' - - @staticmethod - def validate(value): - # raises ValueError if invalid - croniter.croniter(value) - return value - - -class AlarmTimeConstraint(base.Base): - """Representation of a time constraint on an alarm.""" - - name = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the constraint" - - _description = None # provide a default - - def get_description(self): - if not self._description: - return ('Time constraint at %s lasting for %s seconds' - % (self.start, self.duration)) - return self._description - - def set_description(self, value): - self._description = value - - description = wsme.wsproperty(wtypes.text, get_description, - set_description) - "The description of the constraint" - - start = wsme.wsattr(CronType(), mandatory=True) - "Start point of the time constraint, in cron format" - - duration = wsme.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) - "How long the constraint should last, in seconds" - - timezone = wsme.wsattr(wtypes.text, default="") - "Timezone of the constraint" - - def as_dict(self): - return self.as_dict_from_keys(['name', 'description', 'start', - 'duration', 'timezone']) - - @staticmethod - def validate(tc): - if tc.timezone: - try: - pytz.timezone(tc.timezone) - except Exception: - raise base.ClientSideError(_("Timezone %s is not valid") - % tc.timezone) - return tc - - @classmethod - def sample(cls): - return cls(name='SampleConstraint', - description='nightly build every night at 23h for 3 hours', - start='0 23 * * *', - duration=10800, - timezone='Europe/Ljubljana') - - -ALARMS_RULES = extension.ExtensionManager("aodh.alarm.rule") -LOG.debug("alarm rules plugin loaded: %s" % ",".join(ALARMS_RULES.names())) - -ACTIONS_SCHEMA = extension.ExtensionManager( - notifier.AlarmNotifierService.NOTIFIER_EXTENSIONS_NAMESPACE).names() - - -class Alarm(base.Base): - """Representation of an alarm.""" - - alarm_id = wtypes.text - "The UUID of the alarm" - - name = wsme.wsattr(wtypes.text, mandatory=True) - "The name for the alarm" - - _description = None # provide a default - - def get_description(self): - rule = getattr(self, '%s_rule' % self.type, None) - if not self._description: - if hasattr(rule, 'default_description'): - return six.text_type(rule.default_description) - return "%s alarm rule" % self.type - return self._description - - def set_description(self, value): - self._description = value - - description = wsme.wsproperty(wtypes.text, get_description, - set_description) - "The description of the alarm" - - enabled = wsme.wsattr(bool, default=True) - "This alarm is enabled?" - - ok_actions = wsme.wsattr([wtypes.text], default=[]) - "The actions to do when alarm state change to ok" - - alarm_actions = wsme.wsattr([wtypes.text], default=[]) - "The actions to do when alarm state change to alarm" - - insufficient_data_actions = wsme.wsattr([wtypes.text], default=[]) - "The actions to do when alarm state change to insufficient data" - - repeat_actions = wsme.wsattr(bool, default=False) - "The actions should be re-triggered on each evaluation cycle" - - type = base.AdvEnum('type', str, *ALARMS_RULES.names(), - mandatory=True) - "Explicit type specifier to select which rule to follow below." - - time_constraints = wtypes.wsattr([AlarmTimeConstraint], default=[]) - """Describe time constraints for the alarm""" - - # These settings are ignored in the PUT or POST operations, but are - # filled in for GET - project_id = wtypes.text - "The ID of the project or tenant that owns the alarm" - - user_id = wtypes.text - "The ID of the user who created the alarm" - - timestamp = datetime.datetime - "The date of the last alarm definition update" - - state = base.AdvEnum('state', str, *state_kind, - default='insufficient data') - "The state offset the alarm" - - state_timestamp = datetime.datetime - "The date of the last alarm state changed" - - state_reason = wsme.wsattr(wtypes.text, default=ALARM_REASON_DEFAULT) - "The reason of the current state" - - severity = base.AdvEnum('severity', str, *severity_kind, - default='low') - "The severity of the alarm" - - def __init__(self, rule=None, time_constraints=None, **kwargs): - super(Alarm, self).__init__(**kwargs) - - if rule: - setattr(self, '%s_rule' % self.type, - ALARMS_RULES[self.type].plugin(**rule)) - - if time_constraints: - self.time_constraints = [AlarmTimeConstraint(**tc) - for tc in time_constraints] - - @staticmethod - def validate(alarm): - if alarm.type == 'threshold': - warnings.simplefilter("always") - debtcollector.deprecate( - "Ceilometer's API is deprecated as of Ocata. Therefore, " - " threshold rule alarms are no longer supported.", - version="5.0.0") - - Alarm.check_rule(alarm) - Alarm.check_alarm_actions(alarm) - - ALARMS_RULES[alarm.type].plugin.validate_alarm(alarm) - - if alarm.time_constraints: - tc_names = [tc.name for tc in alarm.time_constraints] - if len(tc_names) > len(set(tc_names)): - error = _("Time constraint names must be " - "unique for a given alarm.") - raise base.ClientSideError(error) - - return alarm - - @staticmethod - def check_rule(alarm): - rule = '%s_rule' % alarm.type - if getattr(alarm, rule) in (wtypes.Unset, None): - error = _("%(rule)s must be set for %(type)s" - " type alarm") % {"rule": rule, "type": alarm.type} - raise base.ClientSideError(error) - - rule_set = None - for ext in ALARMS_RULES: - name = "%s_rule" % ext.name - if getattr(alarm, name): - if rule_set is None: - rule_set = name - else: - error = _("%(rule1)s and %(rule2)s cannot be set at the " - "same time") % {'rule1': rule_set, 'rule2': name} - raise base.ClientSideError(error) - - @staticmethod - def check_alarm_actions(alarm): - max_actions = pecan.request.cfg.api.alarm_max_actions - for state in state_kind: - actions_name = state.replace(" ", "_") + '_actions' - actions = getattr(alarm, actions_name) - if not actions: - continue - - action_set = set(actions) - if len(actions) != len(action_set): - LOG.info('duplicate actions are found: %s, ' - 'remove duplicate ones', actions) - actions = list(action_set) - setattr(alarm, actions_name, actions) - - if 0 < max_actions < len(actions): - error = _('%(name)s count exceeds maximum value ' - '%(maximum)d') % {"name": actions_name, - "maximum": max_actions} - raise base.ClientSideError(error) - - limited = rbac.get_limited_to_project(pecan.request.headers, - pecan.request.enforcer) - - for action in actions: - try: - url = netutils.urlsplit(action) - except Exception: - error = _("Unable to parse action %s") % action - raise base.ClientSideError(error) - if url.scheme not in ACTIONS_SCHEMA: - error = _("Unsupported action %s") % action - raise base.ClientSideError(error) - if limited and url.scheme in ('log', 'test'): - error = _('You are not authorized to create ' - 'action: %s') % action - raise base.ClientSideError(error, status_code=401) - - @classmethod - def sample(cls): - return cls(alarm_id=None, - name="SwiftObjectAlarm", - description="An alarm", - type='threshold', - time_constraints=[AlarmTimeConstraint.sample().as_dict()], - user_id="c96c887c216949acbdfbd8b494863567", - project_id="c96c887c216949acbdfbd8b494863567", - enabled=True, - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - state="ok", - severity="moderate", - state_reason="threshold over 90%", - state_timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - ok_actions=["http://site:8000/ok"], - alarm_actions=["http://site:8000/alarm"], - insufficient_data_actions=["http://site:8000/nodata"], - repeat_actions=False, - ) - - def as_dict(self, db_model): - d = super(Alarm, self).as_dict(db_model) - for k in d: - if k.endswith('_rule'): - del d[k] - rule = getattr(self, "%s_rule" % self.type) - d['rule'] = rule if isinstance(rule, dict) else rule.as_dict() - if self.time_constraints: - d['time_constraints'] = [tc.as_dict() - for tc in self.time_constraints] - return d - - @staticmethod - def _is_trust_url(url): - return url.scheme.startswith('trust+') - - def _get_existing_trust_ids(self): - for action in itertools.chain(self.ok_actions or [], - self.alarm_actions or [], - self.insufficient_data_actions or []): - url = netutils.urlsplit(action) - if self._is_trust_url(url): - trust_id = url.username - if trust_id and url.password == 'delete': - yield trust_id - - def update_actions(self, old_alarm=None): - trustor_user_id = pecan.request.headers.get('X-User-Id') - trustor_project_id = pecan.request.headers.get('X-Project-Id') - roles = pecan.request.headers.get('X-Roles', '') - if roles: - roles = roles.split(',') - else: - roles = [] - auth_plugin = pecan.request.environ.get('keystone.token_auth') - - if old_alarm: - prev_trust_ids = set(old_alarm._get_existing_trust_ids()) - else: - prev_trust_ids = set() - trust_id = prev_trust_ids.pop() if prev_trust_ids else None - trust_id_used = False - - for actions in (self.ok_actions, self.alarm_actions, - self.insufficient_data_actions): - if actions is not None: - for index, action in enumerate(actions[:]): - url = netutils.urlsplit(action) - if self._is_trust_url(url): - if '@' in url.netloc: - continue - if trust_id is None: - # We have a trust action without a trust ID, - # create it - trust_id = keystone_client.create_trust_id( - pecan.request.cfg, - trustor_user_id, trustor_project_id, roles, - auth_plugin) - if trust_id_used: - pw = '' - else: - pw = ':delete' - trust_id_used = True - netloc = '%s%s@%s' % (trust_id, pw, url.netloc) - url = urlparse.SplitResult(url.scheme, netloc, - url.path, url.query, - url.fragment) - actions[index] = url.geturl() - if trust_id is not None and not trust_id_used: - prev_trust_ids.add(trust_id) - for old_trust_id in prev_trust_ids: - keystone_client.delete_trust_id(old_trust_id, auth_plugin) - - def delete_actions(self): - auth_plugin = pecan.request.environ.get('keystone.token_auth') - for trust_id in self._get_existing_trust_ids(): - keystone_client.delete_trust_id(trust_id, auth_plugin) - - -Alarm.add_attributes(**{"%s_rule" % ext.name: ext.plugin - for ext in ALARMS_RULES}) - - -class AlarmChange(base.Base): - """Representation of an event in an alarm's history.""" - - event_id = wtypes.text - "The UUID of the change event" - - alarm_id = wtypes.text - "The UUID of the alarm" - - type = wtypes.Enum(str, - 'creation', - 'rule change', - 'state transition', - 'deletion') - "The type of change" - - detail = wtypes.text - "JSON fragment describing change" - - project_id = wtypes.text - "The project ID of the initiating identity" - - user_id = wtypes.text - "The user ID of the initiating identity" - - on_behalf_of = wtypes.text - "The tenant on behalf of which the change is being made" - - timestamp = datetime.datetime - "The time/date of the alarm change" - - @classmethod - def sample(cls): - return cls(alarm_id='e8ff32f772a44a478182c3fe1f7cad6a', - type='rule change', - detail='{"threshold": 42.0, "evaluation_periods": 4}', - user_id="3e5d11fda79448ac99ccefb20be187ca", - project_id="b6f16144010811e387e4de429e99ee8c", - on_behalf_of="92159030020611e3b26dde429e99ee8c", - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - ) - - -def _send_notification(event, payload): - notification = event.replace(" ", "_") - notification = "alarm.%s" % notification - transport = messaging.get_transport(pecan.request.cfg) - notifier = messaging.get_notifier(transport, publisher_id="aodh.api") - # FIXME(sileht): perhaps we need to copy some infos from the - # pecan request headers like nova does - notifier.info({}, notification, payload) - - -def stringify_timestamps(data): - """Stringify any datetimes in given dict.""" - return dict((k, v.isoformat() - if isinstance(v, datetime.datetime) else v) - for (k, v) in six.iteritems(data)) - - -class AlarmController(rest.RestController): - """Manages operations on a single alarm.""" - - _custom_actions = { - 'history': ['GET'], - 'state': ['PUT', 'GET'], - } - - def __init__(self, alarm_id): - pecan.request.context['alarm_id'] = alarm_id - self._id = alarm_id - - def _enforce_rbac(self, rbac_directive): - # TODO(sileht): We should be able to relax this since we - # pass the alarm object to the enforcer. - auth_project = rbac.get_limited_to_project(pecan.request.headers, - pecan.request.enforcer) - alarms = list(pecan.request.storage.get_alarms(alarm_id=self._id, - project=auth_project)) - if not alarms: - raise base.AlarmNotFound(alarm=self._id, auth_project=auth_project) - alarm = alarms[0] - target = {'user_id': alarm.user_id, - 'project_id': alarm.project_id} - rbac.enforce(rbac_directive, pecan.request.headers, - pecan.request.enforcer, target) - return alarm - - def _record_change(self, data, now, on_behalf_of=None, type=None): - if not pecan.request.cfg.record_history: - return - if not data: - return - type = type or models.AlarmChange.RULE_CHANGE - scrubbed_data = stringify_timestamps(data) - detail = json.dumps(scrubbed_data) - user_id = pecan.request.headers.get('X-User-Id') - project_id = pecan.request.headers.get('X-Project-Id') - on_behalf_of = on_behalf_of or project_id - severity = scrubbed_data.get('severity') - payload = dict(event_id=uuidutils.generate_uuid(), - alarm_id=self._id, - type=type, - detail=detail, - user_id=user_id, - project_id=project_id, - on_behalf_of=on_behalf_of, - timestamp=now, - severity=severity) - - try: - pecan.request.storage.record_alarm_change(payload) - except aodh.NotImplementedError: - pass - - # Revert to the pre-json'ed details ... - payload['detail'] = scrubbed_data - _send_notification(type, payload) - - def _record_delete(self, alarm): - if not alarm: - return - type = models.AlarmChange.DELETION - detail = {'state': alarm.state} - user_id = pecan.request.headers.get('X-User-Id') - project_id = pecan.request.headers.get('X-Project-Id') - payload = dict(event_id=uuidutils.generate_uuid(), - alarm_id=self._id, - type=type, - detail=detail, - user_id=user_id, - project_id=project_id, - on_behalf_of=project_id, - timestamp=timeutils.utcnow(), - severity=alarm.severity) - - pecan.request.storage.delete_alarm(alarm.alarm_id) - _send_notification(type, payload) - - @wsme_pecan.wsexpose(Alarm) - def get(self): - """Return this alarm.""" - return Alarm.from_db_model(self._enforce_rbac('get_alarm')) - - @wsme_pecan.wsexpose(Alarm, body=Alarm) - def put(self, data): - """Modify this alarm. - - :param data: an alarm within the request body. - """ - - # Ensure alarm exists - alarm_in = self._enforce_rbac('change_alarm') - - now = timeutils.utcnow() - - data.alarm_id = self._id - - user, project = rbac.get_limited_to(pecan.request.headers, - pecan.request.enforcer) - if user: - data.user_id = user - elif data.user_id == wtypes.Unset: - data.user_id = alarm_in.user_id - if project: - data.project_id = project - elif data.project_id == wtypes.Unset: - data.project_id = alarm_in.project_id - data.timestamp = now - if alarm_in.state != data.state: - data.state_timestamp = now - data.state_reason = ALARM_REASON_MANUAL - else: - data.state_timestamp = alarm_in.state_timestamp - data.state_reason = alarm_in.state_reason - - ALARMS_RULES[data.type].plugin.update_hook(data) - - old_data = Alarm.from_db_model(alarm_in) - old_alarm = old_data.as_dict(models.Alarm) - data.update_actions(old_data) - updated_alarm = data.as_dict(models.Alarm) - try: - alarm_in = models.Alarm(**updated_alarm) - except Exception: - LOG.exception("Error while putting alarm: %s", updated_alarm) - raise base.ClientSideError(_("Alarm incorrect")) - - alarm = pecan.request.storage.update_alarm(alarm_in) - - change = dict((k, v) for k, v in updated_alarm.items() - if v != old_alarm[k] and k not in - ['timestamp', 'state_timestamp']) - self._record_change(change, now, on_behalf_of=alarm.project_id) - return Alarm.from_db_model(alarm) - - @wsme_pecan.wsexpose(None, status_code=204) - def delete(self): - """Delete this alarm.""" - - # ensure alarm exists before deleting - alarm = self._enforce_rbac('delete_alarm') - self._record_delete(alarm) - alarm_object = Alarm.from_db_model(alarm) - alarm_object.delete_actions() - - @wsme_pecan.wsexpose([AlarmChange], [base.Query], [str], int, str) - def history(self, q=None, sort=None, limit=None, marker=None): - """Assembles the alarm history requested. - - :param q: Filter rules for the changes to be described. - :param sort: A list of pairs of sort key and sort dir. - :param limit: The maximum number of items to be return. - :param marker: The pagination query marker. - """ - - # Ensure alarm exists - self._enforce_rbac('alarm_history') - - q = q or [] - # allow history to be returned for deleted alarms, but scope changes - # returned to those carried out on behalf of the auth'd tenant, to - # avoid inappropriate cross-tenant visibility of alarm history - auth_project = rbac.get_limited_to_project(pecan.request.headers, - pecan.request.enforcer) - conn = pecan.request.storage - kwargs = v2_utils.query_to_kwargs( - q, conn.get_alarm_changes, ['on_behalf_of', 'alarm_id']) - if sort or limit or marker: - kwargs['pagination'] = v2_utils.get_pagination_options( - sort, limit, marker, models.AlarmChange) - return [AlarmChange.from_db_model(ac) - for ac in conn.get_alarm_changes(self._id, auth_project, - **kwargs)] - - @wsme.validate(state_kind_enum) - @wsme_pecan.wsexpose(state_kind_enum, body=state_kind_enum) - def put_state(self, state): - """Set the state of this alarm. - - :param state: an alarm state within the request body. - """ - - alarm = self._enforce_rbac('change_alarm_state') - - # note(sileht): body are not validated by wsme - # Workaround for https://bugs.launchpad.net/wsme/+bug/1227229 - if state not in state_kind: - raise base.ClientSideError(_("state invalid")) - now = timeutils.utcnow() - alarm.state = state - alarm.state_timestamp = now - alarm.state_reason = ALARM_REASON_MANUAL - alarm = pecan.request.storage.update_alarm(alarm) - change = {'state': alarm.state, - 'state_reason': alarm.state_reason} - self._record_change(change, now, on_behalf_of=alarm.project_id, - type=models.AlarmChange.STATE_TRANSITION) - return alarm.state - - @wsme_pecan.wsexpose(state_kind_enum) - def get_state(self): - """Get the state of this alarm.""" - return self._enforce_rbac('get_alarm_state').state - - -class AlarmsController(rest.RestController): - """Manages operations on the alarms collection.""" - - @pecan.expose() - def _lookup(self, alarm_id, *remainder): - return AlarmController(alarm_id), remainder - - @staticmethod - def _record_creation(conn, data, alarm_id, now): - if not pecan.request.cfg.record_history: - return - type = models.AlarmChange.CREATION - scrubbed_data = stringify_timestamps(data) - detail = json.dumps(scrubbed_data) - user_id = pecan.request.headers.get('X-User-Id') - project_id = pecan.request.headers.get('X-Project-Id') - severity = scrubbed_data.get('severity') - payload = dict(event_id=uuidutils.generate_uuid(), - alarm_id=alarm_id, - type=type, - detail=detail, - user_id=user_id, - project_id=project_id, - on_behalf_of=project_id, - timestamp=now, - severity=severity) - - try: - conn.record_alarm_change(payload) - except aodh.NotImplementedError: - pass - - # Revert to the pre-json'ed details ... - payload['detail'] = scrubbed_data - _send_notification(type, payload) - - @wsme_pecan.wsexpose(Alarm, body=Alarm, status_code=201) - def post(self, data): - """Create a new alarm. - - :param data: an alarm within the request body. - """ - rbac.enforce('create_alarm', pecan.request.headers, - pecan.request.enforcer, {}) - - conn = pecan.request.storage - now = timeutils.utcnow() - - data.alarm_id = uuidutils.generate_uuid() - user_limit, project_limit = rbac.get_limited_to(pecan.request.headers, - pecan.request.enforcer) - - def _set_ownership(aspect, owner_limitation, header): - attr = '%s_id' % aspect - requested_owner = getattr(data, attr) - explicit_owner = requested_owner != wtypes.Unset - caller = pecan.request.headers.get(header) - if (owner_limitation and explicit_owner - and requested_owner != caller): - raise base.ProjectNotAuthorized(requested_owner, aspect) - - actual_owner = (owner_limitation or - requested_owner if explicit_owner else caller) - setattr(data, attr, actual_owner) - - _set_ownership('user', user_limit, 'X-User-Id') - _set_ownership('project', project_limit, 'X-Project-Id') - - # Check if there's room for one more alarm - if is_over_quota(conn, data.project_id, data.user_id): - raise OverQuota(data) - - data.timestamp = now - data.state_timestamp = now - data.state_reason = ALARM_REASON_DEFAULT - - ALARMS_RULES[data.type].plugin.create_hook(data) - - change = data.as_dict(models.Alarm) - - data.update_actions() - - try: - alarm_in = models.Alarm(**change) - except Exception: - LOG.exception("Error while posting alarm: %s", change) - raise base.ClientSideError(_("Alarm incorrect")) - - alarm = conn.create_alarm(alarm_in) - self._record_creation(conn, change, alarm.alarm_id, now) - v2_utils.set_resp_location_hdr("/alarms/" + alarm.alarm_id) - return Alarm.from_db_model(alarm) - - @wsme_pecan.wsexpose([Alarm], [base.Query], [str], int, str) - def get_all(self, q=None, sort=None, limit=None, marker=None): - """Return all alarms, based on the query provided. - - :param q: Filter rules for the alarms to be returned. - :param sort: A list of pairs of sort key and sort dir. - :param limit: The maximum number of items to be return. - :param marker: The pagination query marker. - """ - target = rbac.target_from_segregation_rule( - pecan.request.headers, pecan.request.enforcer) - rbac.enforce('get_alarms', pecan.request.headers, - pecan.request.enforcer, target) - - q = q or [] - # Timestamp is not supported field for Simple Alarm queries - kwargs = v2_utils.query_to_kwargs( - q, pecan.request.storage.get_alarms, - allow_timestamps=False) - if sort or limit or marker: - kwargs['pagination'] = v2_utils.get_pagination_options( - sort, limit, marker, models.Alarm) - return [Alarm.from_db_model(m) - for m in pecan.request.storage.get_alarms(**kwargs)] diff --git a/aodh/api/controllers/v2/base.py b/aodh/api/controllers/v2/base.py deleted file mode 100644 index 6c95c050..00000000 --- a/aodh/api/controllers/v2/base.py +++ /dev/null @@ -1,233 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -import datetime -import functools -import inspect - -from oslo_utils import strutils -from oslo_utils import timeutils -import pecan -import six -import wsme -from wsme import types as wtypes - -from aodh.i18n import _ - - -operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt') -operation_kind_enum = wtypes.Enum(str, *operation_kind) - - -class ClientSideError(wsme.exc.ClientSideError): - def __init__(self, error, status_code=400): - pecan.response.translatable_error = error - super(ClientSideError, self).__init__(error, status_code) - - -class ProjectNotAuthorized(ClientSideError): - def __init__(self, id, aspect='project'): - params = dict(aspect=aspect, id=id) - super(ProjectNotAuthorized, self).__init__( - _("Not Authorized to access %(aspect)s %(id)s") % params, - status_code=401) - - -class AdvEnum(wtypes.wsproperty): - """Handle default and mandatory for wtypes.Enum.""" - def __init__(self, name, *args, **kwargs): - self._name = '_advenum_%s' % name - self._default = kwargs.pop('default', None) - mandatory = kwargs.pop('mandatory', False) - enum = wtypes.Enum(*args, **kwargs) - super(AdvEnum, self).__init__(datatype=enum, fget=self._get, - fset=self._set, mandatory=mandatory) - - def _get(self, parent): - if hasattr(parent, self._name): - value = getattr(parent, self._name) - return value or self._default - return self._default - - def _set(self, parent, value): - try: - if self.datatype.validate(value): - setattr(parent, self._name, value) - except ValueError as e: - raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1), - value, e) - - -class Base(wtypes.DynamicBase): - - @classmethod - def from_db_model(cls, m): - return cls(**(m.as_dict())) - - @classmethod - def from_db_and_links(cls, m, links): - return cls(links=links, **(m.as_dict())) - - def as_dict(self, db_model): - valid_keys = inspect.getargspec(db_model.__init__)[0] - if 'self' in valid_keys: - valid_keys.remove('self') - return self.as_dict_from_keys(valid_keys) - - def as_dict_from_keys(self, keys): - return dict((k, getattr(self, k)) - for k in keys - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - -class Query(Base): - """Query filter.""" - - # The data types supported by the query. - _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime'] - - # Functions to convert the data field to the correct type. - _type_converters = {'integer': int, - 'float': float, - 'boolean': functools.partial( - strutils.bool_from_string, strict=True), - 'string': six.text_type, - 'datetime': timeutils.parse_isotime} - - _op = None # provide a default - - def get_op(self): - return self._op or 'eq' - - def set_op(self, value): - self._op = value - - field = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the field to test" - - # op = wsme.wsattr(operation_kind, default='eq') - # this ^ doesn't seem to work. - op = wsme.wsproperty(operation_kind_enum, get_op, set_op) - "The comparison operator. Defaults to 'eq'." - - value = wsme.wsattr(wtypes.text, mandatory=True) - "The value to compare against the stored data" - - type = wtypes.text - "The data type of value to compare against the stored data" - - def __repr__(self): - # for logging calls - return '' % (self.field, - self.op, - self.value, - self.type) - - @classmethod - def sample(cls): - return cls(field='resource_id', - op='eq', - value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - type='string' - ) - - def as_dict(self): - return self.as_dict_from_keys(['field', 'op', 'type', 'value']) - - def _get_value_as_type(self, forced_type=None): - """Convert metadata value to the specified data type. - - This method is called during metadata query to help convert the - querying metadata to the data type specified by user. If there is no - data type given, the metadata will be parsed by ast.literal_eval to - try to do a smart converting. - - NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised - from wsmeext/sphinxext.py. It's OK to call it outside the Query class. - Because the "public" side of that class is actually the outside of the - API, and the "private" side is the API implementation. The method is - only used in the API implementation, so it's OK. - - :returns: metadata value converted with the specified data type. - """ - type = forced_type or self.type - try: - converted_value = self.value - if not type: - try: - converted_value = ast.literal_eval(self.value) - except (ValueError, SyntaxError): - # Unable to convert the metadata value automatically - # let it default to self.value - pass - else: - if type not in self._supported_types: - # Types must be explicitly declared so the - # correct type converter may be used. Subclasses - # of Query may define _supported_types and - # _type_converters to define their own types. - raise TypeError() - converted_value = self._type_converters[type](self.value) - if isinstance(converted_value, datetime.datetime): - converted_value = timeutils.normalize_time(converted_value) - except ValueError: - msg = (_('Unable to convert the value %(value)s' - ' to the expected data type %(type)s.') % - {'value': self.value, 'type': type}) - raise ClientSideError(msg) - except TypeError: - msg = (_('The data type %(type)s is not supported. The supported' - ' data type list is: %(supported)s') % - {'type': type, 'supported': self._supported_types}) - raise ClientSideError(msg) - except Exception: - msg = (_('Unexpected exception converting %(value)s to' - ' the expected data type %(type)s.') % - {'value': self.value, 'type': type}) - raise ClientSideError(msg) - return converted_value - - -class AlarmNotFound(ClientSideError): - def __init__(self, alarm, auth_project): - if not auth_project: - msg = _('Alarm %s not found') % alarm - else: - msg = _('Alarm %(alarm_id)s not found in project %' - '(project)s') % { - 'alarm_id': alarm, 'project': auth_project} - super(AlarmNotFound, self).__init__(msg, status_code=404) - - -class AlarmRule(Base): - """Base class Alarm Rule extension and wsme.types.""" - @staticmethod - def validate_alarm(alarm): - pass - - @staticmethod - def create_hook(alarm): - pass - - @staticmethod - def update_hook(alarm): - pass diff --git a/aodh/api/controllers/v2/capabilities.py b/aodh/api/controllers/v2/capabilities.py deleted file mode 100644 index d6a19223..00000000 --- a/aodh/api/controllers/v2/capabilities.py +++ /dev/null @@ -1,111 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from pecan import rest -import six -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from aodh.api.controllers.v2 import base - - -def _decode_unicode(input): - """Decode the unicode of the message, and encode it into utf-8.""" - if isinstance(input, dict): - temp = {} - # If the input data is a dict, create an equivalent dict with a - # predictable insertion order to avoid inconsistencies in the - # message signature computation for equivalent payloads modulo - # ordering - for key, value in sorted(six.iteritems(input)): - temp[_decode_unicode(key)] = _decode_unicode(value) - return temp - elif isinstance(input, (tuple, list)): - # When doing a pair of JSON encode/decode operations to the tuple, - # the tuple would become list. So we have to generate the value as - # list here. - return [_decode_unicode(element) for element in input] - elif isinstance(input, six.text_type): - return input.encode('utf-8') - else: - return input - - -def _recursive_keypairs(d, separator=':'): - """Generator that produces sequence of keypairs for nested dictionaries.""" - for name, value in sorted(six.iteritems(d)): - if isinstance(value, dict): - for subname, subvalue in _recursive_keypairs(value, separator): - yield ('%s%s%s' % (name, separator, subname), subvalue) - elif isinstance(value, (tuple, list)): - yield name, _decode_unicode(value) - else: - yield name, value - - -def _flatten_capabilities(capabilities): - return dict((k, v) for k, v in _recursive_keypairs(capabilities)) - - -class Capabilities(base.Base): - """A representation of the API and storage capabilities. - - Usually constrained by restrictions imposed by the storage driver. - """ - - api = {wtypes.text: bool} - "A flattened dictionary of API capabilities" - alarm_storage = {wtypes.text: bool} - "A flattened dictionary of alarm storage capabilities" - - @classmethod - def sample(cls): - return cls( - api=_flatten_capabilities({ - 'alarms': {'query': {'simple': True, - 'complex': True}, - 'history': {'query': {'simple': True, - 'complex': True}}}, - }), - alarm_storage=_flatten_capabilities( - {'storage': {'production_ready': True}}), - ) - - -class CapabilitiesController(rest.RestController): - """Manages capabilities queries.""" - - @wsme_pecan.wsexpose(Capabilities) - def get(self): - """Returns a flattened dictionary of API capabilities. - - Capabilities supported by the currently configured storage driver. - """ - # variation in API capabilities is effectively determined by - # the lack of strict feature parity across storage drivers - alarm_conn = pecan.request.storage - driver_capabilities = { - 'alarms': alarm_conn.get_capabilities()['alarms'], - } - alarm_driver_perf = alarm_conn.get_storage_capabilities() - return Capabilities(api=_flatten_capabilities(driver_capabilities), - alarm_storage=_flatten_capabilities( - alarm_driver_perf)) diff --git a/aodh/api/controllers/v2/query.py b/aodh/api/controllers/v2/query.py deleted file mode 100644 index 29f9ca8b..00000000 --- a/aodh/api/controllers/v2/query.py +++ /dev/null @@ -1,395 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import jsonschema -from oslo_log import log -from oslo_utils import timeutils -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from aodh.api.controllers.v2 import alarms -from aodh.api.controllers.v2 import base -from aodh.api import rbac -from aodh.i18n import _ -from aodh.storage import models - -LOG = log.getLogger(__name__) - - -class ComplexQuery(base.Base): - """Holds a sample query encoded in json.""" - - filter = wtypes.text - "The filter expression encoded in json." - - orderby = wtypes.text - "List of single-element dicts for specifing the ordering of the results." - - limit = int - "The maximum number of results to be returned." - - @classmethod - def sample(cls): - return cls(filter='{"and": [{"and": [{"=": ' + - '{"counter_name": "cpu_util"}}, ' + - '{">": {"counter_volume": 0.23}}, ' + - '{"<": {"counter_volume": 0.26}}]}, ' + - '{"or": [{"and": [{">": ' + - '{"timestamp": "2013-12-01T18:00:00"}}, ' + - '{"<": ' + - '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + - '{"and": [{">": ' + - '{"timestamp": "2013-12-01T18:30:00"}}, ' + - '{"<": ' + - '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', - orderby='[{"counter_volume": "ASC"}, ' + - '{"timestamp": "DESC"}]', - limit=42 - ) - - -def _list_to_regexp(items, regexp_prefix=""): - regexp = ["^%s$" % item for item in items] - regexp = regexp_prefix + "|".join(regexp) - return regexp - - -class ValidatedComplexQuery(object): - complex_operators = ["and", "or"] - order_directions = ["asc", "desc"] - simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] - regexp_prefix = "(?i)" - - complex_ops = _list_to_regexp(complex_operators, regexp_prefix) - simple_ops = _list_to_regexp(simple_ops, regexp_prefix) - order_directions = _list_to_regexp(order_directions, regexp_prefix) - - timestamp_fields = ["timestamp", "state_timestamp"] - - def __init__(self, query, db_model, additional_name_mapping=None, - metadata_allowed=False): - additional_name_mapping = additional_name_mapping or {} - self.name_mapping = {"user": "user_id", - "project": "project_id"} - self.name_mapping.update(additional_name_mapping) - valid_keys = db_model.get_field_names() - valid_keys = list(valid_keys) + list(self.name_mapping.keys()) - valid_fields = _list_to_regexp(valid_keys) - - if metadata_allowed: - valid_filter_fields = valid_fields + "|^metadata\.[\S]+$" - else: - valid_filter_fields = valid_fields - - schema_value = { - "oneOf": [{"type": "string"}, - {"type": "number"}, - {"type": "boolean"}], - "minProperties": 1, - "maxProperties": 1} - - schema_value_in = { - "type": "array", - "items": {"oneOf": [{"type": "string"}, - {"type": "number"}]}, - "minItems": 1} - - schema_field = { - "type": "object", - "patternProperties": {valid_filter_fields: schema_value}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_field_in = { - "type": "object", - "patternProperties": {valid_filter_fields: schema_value_in}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_leaf_in = { - "type": "object", - "patternProperties": {"(?i)^in$": schema_field_in}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_leaf_simple_ops = { - "type": "object", - "patternProperties": {self.simple_ops: schema_field}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_and_or_array = { - "type": "array", - "items": {"$ref": "#"}, - "minItems": 2} - - schema_and_or = { - "type": "object", - "patternProperties": {self.complex_ops: schema_and_or_array}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_not = { - "type": "object", - "patternProperties": {"(?i)^not$": {"$ref": "#"}}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - self.schema = { - "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, - {"$ref": "#/definitions/leaf_in"}, - {"$ref": "#/definitions/and_or"}, - {"$ref": "#/definitions/not"}], - "minProperties": 1, - "maxProperties": 1, - "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, - "leaf_in": schema_leaf_in, - "and_or": schema_and_or, - "not": schema_not}} - - self.orderby_schema = { - "type": "array", - "items": { - "type": "object", - "patternProperties": - {valid_fields: - {"type": "string", - "pattern": self.order_directions}}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1}} - - self.original_query = query - - def validate(self, visibility_field): - """Validates the query content and does the necessary conversions.""" - if self.original_query.filter is wtypes.Unset: - self.filter_expr = None - else: - try: - self.filter_expr = json.loads(self.original_query.filter) - self._validate_filter(self.filter_expr) - except (ValueError, jsonschema.exceptions.ValidationError) as e: - raise base.ClientSideError( - _("Filter expression not valid: %s") % str(e)) - self._replace_isotime_with_datetime(self.filter_expr) - self._convert_operator_to_lower_case(self.filter_expr) - self._normalize_field_names_for_db_model(self.filter_expr) - - self._force_visibility(visibility_field) - - if self.original_query.orderby is wtypes.Unset: - self.orderby = None - else: - try: - self.orderby = json.loads(self.original_query.orderby) - self._validate_orderby(self.orderby) - except (ValueError, jsonschema.exceptions.ValidationError) as e: - raise base.ClientSideError( - _("Order-by expression not valid: %s") % e) - self._convert_orderby_to_lower_case(self.orderby) - self._normalize_field_names_in_orderby(self.orderby) - - if self.original_query.limit is wtypes.Unset: - self.limit = None - else: - self.limit = self.original_query.limit - - if self.limit is not None and self.limit <= 0: - msg = _('Limit should be positive') - raise base.ClientSideError(msg) - - @staticmethod - def lowercase_values(mapping): - """Converts the values in the mapping dict to lowercase.""" - items = mapping.items() - for key, value in items: - mapping[key] = value.lower() - - def _convert_orderby_to_lower_case(self, orderby): - for orderby_field in orderby: - self.lowercase_values(orderby_field) - - def _normalize_field_names_in_orderby(self, orderby): - for orderby_field in orderby: - self._replace_field_names(orderby_field) - - def _traverse_postorder(self, tree, visitor): - op = list(tree.keys())[0] - if op.lower() in self.complex_operators: - for i, operand in enumerate(tree[op]): - self._traverse_postorder(operand, visitor) - if op.lower() == "not": - self._traverse_postorder(tree[op], visitor) - - visitor(tree) - - def _check_cross_project_references(self, own_project_id, - visibility_field): - """Do not allow other than own_project_id.""" - def check_project_id(subfilter): - op, value = list(subfilter.items())[0] - if (op.lower() not in self.complex_operators - and list(value.keys())[0] == visibility_field - and value[visibility_field] != own_project_id): - raise base.ProjectNotAuthorized(value[visibility_field]) - - self._traverse_postorder(self.filter_expr, check_project_id) - - def _force_visibility(self, visibility_field): - """Force visibility field. - - If the tenant is not admin insert an extra - "and =" clause to the query. - """ - authorized_project = rbac.get_limited_to_project( - pecan.request.headers, pecan.request.enforcer) - is_admin = authorized_project is None - if not is_admin: - self._restrict_to_project(authorized_project, visibility_field) - self._check_cross_project_references(authorized_project, - visibility_field) - - def _restrict_to_project(self, project_id, visibility_field): - restriction = {"=": {visibility_field: project_id}} - if self.filter_expr is None: - self.filter_expr = restriction - else: - self.filter_expr = {"and": [restriction, self.filter_expr]} - - def _replace_isotime_with_datetime(self, filter_expr): - def replace_isotime(subfilter): - op, value = list(subfilter.items())[0] - if op.lower() not in self.complex_operators: - field = list(value.keys())[0] - if field in self.timestamp_fields: - date_time = self._convert_to_datetime(subfilter[op][field]) - subfilter[op][field] = date_time - - self._traverse_postorder(filter_expr, replace_isotime) - - def _normalize_field_names_for_db_model(self, filter_expr): - def _normalize_field_names(subfilter): - op, value = list(subfilter.items())[0] - if op.lower() not in self.complex_operators: - self._replace_field_names(value) - self._traverse_postorder(filter_expr, - _normalize_field_names) - - def _replace_field_names(self, subfilter): - field, value = list(subfilter.items())[0] - if field in self.name_mapping: - del subfilter[field] - subfilter[self.name_mapping[field]] = value - if field.startswith("metadata."): - del subfilter[field] - subfilter["resource_" + field] = value - - @staticmethod - def lowercase_keys(mapping): - """Converts the values of the keys in mapping to lowercase.""" - items = mapping.items() - for key, value in items: - del mapping[key] - mapping[key.lower()] = value - - def _convert_operator_to_lower_case(self, filter_expr): - self._traverse_postorder(filter_expr, self.lowercase_keys) - - @staticmethod - def _convert_to_datetime(isotime): - try: - date_time = timeutils.parse_isotime(isotime) - date_time = date_time.replace(tzinfo=None) - return date_time - except ValueError: - LOG.exception("String %s is not a valid isotime", isotime) - msg = _('Failed to parse the timestamp value %s') % isotime - raise base.ClientSideError(msg) - - def _validate_filter(self, filter_expr): - jsonschema.validate(filter_expr, self.schema) - - def _validate_orderby(self, orderby_expr): - jsonschema.validate(orderby_expr, self.orderby_schema) - - -class QueryAlarmHistoryController(rest.RestController): - """Provides complex query possibilities for alarm history.""" - @wsme_pecan.wsexpose([alarms.AlarmChange], body=ComplexQuery) - def post(self, body): - """Define query for retrieving AlarmChange data. - - :param body: Query rules for the alarm history to be returned. - """ - target = rbac.target_from_segregation_rule( - pecan.request.headers, pecan.request.enforcer) - rbac.enforce('query_alarm_history', pecan.request.headers, - pecan.request.enforcer, target) - - query = ValidatedComplexQuery(body, - models.AlarmChange) - query.validate(visibility_field="on_behalf_of") - conn = pecan.request.storage - return [alarms.AlarmChange.from_db_model(s) - for s in conn.query_alarm_history(query.filter_expr, - query.orderby, - query.limit)] - - -class QueryAlarmsController(rest.RestController): - """Provides complex query possibilities for alarms.""" - history = QueryAlarmHistoryController() - - @wsme_pecan.wsexpose([alarms.Alarm], body=ComplexQuery) - def post(self, body): - """Define query for retrieving Alarm data. - - :param body: Query rules for the alarms to be returned. - """ - - target = rbac.target_from_segregation_rule( - pecan.request.headers, pecan.request.enforcer) - rbac.enforce('query_alarm', pecan.request.headers, - pecan.request.enforcer, target) - - query = ValidatedComplexQuery(body, - models.Alarm) - query.validate(visibility_field="project_id") - conn = pecan.request.storage - return [alarms.Alarm.from_db_model(s) - for s in conn.query_alarms(query.filter_expr, - query.orderby, - query.limit)] - - -class QueryController(rest.RestController): - - alarms = QueryAlarmsController() diff --git a/aodh/api/controllers/v2/root.py b/aodh/api/controllers/v2/root.py deleted file mode 100644 index a6f94cb2..00000000 --- a/aodh/api/controllers/v2/root.py +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from aodh.api.controllers.v2 import alarms -from aodh.api.controllers.v2 import capabilities -from aodh.api.controllers.v2 import query - - -class V2Controller(object): - """Version 2 API controller root.""" - - alarms = alarms.AlarmsController() - query = query.QueryController() - capabilities = capabilities.CapabilitiesController() diff --git a/aodh/api/controllers/v2/utils.py b/aodh/api/controllers/v2/utils.py deleted file mode 100644 index 5018b332..00000000 --- a/aodh/api/controllers/v2/utils.py +++ /dev/null @@ -1,322 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -import inspect - -from oslo_utils import timeutils -import pecan -import six -from six.moves.urllib import parse as urllib_parse -import wsme - -from aodh.api.controllers.v2 import base -from aodh.api import rbac - - -def get_auth_project(on_behalf_of=None): - # when an alarm is created by an admin on behalf of another tenant - # we must ensure for: - # - threshold alarm, that an implicit query constraint on project_id is - # added so that admin-level visibility on statistics is not leaked - # Hence, for null auth_project (indicating admin-ness) we check if - # the creating tenant differs from the tenant on whose behalf the - # alarm is being created - auth_project = rbac.get_limited_to_project(pecan.request.headers, - pecan.request.enforcer) - created_by = pecan.request.headers.get('X-Project-Id') - is_admin = auth_project is None - - if is_admin and on_behalf_of != created_by: - auth_project = on_behalf_of - return auth_project - - -def sanitize_query(query, db_func, on_behalf_of=None): - """Check the query. - - See if: - 1) the request is coming from admin - then allow full visibility - 2) non-admin - make sure that the query includes the requester's project. - """ - q = copy.copy(query) - - auth_project = get_auth_project(on_behalf_of) - if auth_project: - _verify_query_segregation(q, auth_project) - - proj_q = [i for i in q if i.field == 'project_id'] - valid_keys = inspect.getargspec(db_func)[0] - if not proj_q and 'on_behalf_of' not in valid_keys: - # The user is restricted, but they didn't specify a project - # so add it for them. - q.append(base.Query(field='project_id', - op='eq', - value=auth_project)) - return q - - -def _verify_query_segregation(query, auth_project=None): - """Ensure non-admin queries are not constrained to another project.""" - auth_project = (auth_project or - rbac.get_limited_to_project(pecan.request.headers, - pecan.request.enforcer)) - - if not auth_project: - return - - for q in query: - if q.field in ('project', 'project_id') and auth_project != q.value: - raise base.ProjectNotAuthorized(q.value) - - -def validate_query(query, db_func, internal_keys=None, - allow_timestamps=True): - """Validates the syntax of the query and verifies the query. - - Verification check if the query request is authorized for the included - project. - :param query: Query expression that should be validated - :param db_func: the function on the storage level, of which arguments - will form the valid_keys list, which defines the valid fields for a - query expression - :param internal_keys: internally used field names, that should not be - used for querying - :param allow_timestamps: defines whether the timestamp-based constraint is - applicable for this query or not - - :raises InvalidInput: if an operator is not supported for a given field - :raises InvalidInput: if timestamp constraints are allowed, but - search_offset was included without timestamp constraint - :raises UnknownArgument: if a field name is not a timestamp field, nor - in the list of valid keys - """ - - internal_keys = internal_keys or [] - _verify_query_segregation(query) - - valid_keys = inspect.getargspec(db_func)[0] - if 'alarm_type' in valid_keys: - valid_keys.remove('alarm_type') - valid_keys.append('type') - if 'pagination' in valid_keys: - valid_keys.remove('pagination') - - internal_timestamp_keys = ['end_timestamp', 'start_timestamp', - 'end_timestamp_op', 'start_timestamp_op'] - if 'start_timestamp' in valid_keys: - internal_keys += internal_timestamp_keys - valid_keys += ['timestamp', 'search_offset'] - internal_keys.append('self') - internal_keys.append('metaquery') - valid_keys = set(valid_keys) - set(internal_keys) - translation = {'user_id': 'user', - 'project_id': 'project', - 'resource_id': 'resource'} - - has_timestamp_query = _validate_timestamp_fields(query, - 'timestamp', - ('lt', 'le', 'gt', 'ge'), - allow_timestamps) - has_search_offset_query = _validate_timestamp_fields(query, - 'search_offset', - 'eq', - allow_timestamps) - - if has_search_offset_query and not has_timestamp_query: - raise wsme.exc.InvalidInput('field', 'search_offset', - "search_offset cannot be used without " + - "timestamp") - - def _is_field_metadata(field): - return (field.startswith('metadata.') or - field.startswith('resource_metadata.')) - - for i in query: - if i.field not in ('timestamp', 'search_offset'): - key = translation.get(i.field, i.field) - operator = i.op - if key in valid_keys or _is_field_metadata(i.field): - if operator == 'eq': - if key == 'enabled': - i._get_value_as_type('boolean') - elif _is_field_metadata(key): - i._get_value_as_type() - else: - raise wsme.exc.InvalidInput('op', i.op, - 'unimplemented operator for ' - '%s' % i.field) - else: - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (query, sorted(valid_keys)) - raise wsme.exc.UnknownArgument(key, msg) - - -def _validate_timestamp_fields(query, field_name, operator_list, - allow_timestamps): - """Validates the timestamp related constraints in a query if there are any. - - :param query: query expression that may contain the timestamp fields - :param field_name: timestamp name, which should be checked (timestamp, - search_offset) - :param operator_list: list of operators that are supported for that - timestamp, which was specified in the parameter field_name - :param allow_timestamps: defines whether the timestamp-based constraint is - applicable to this query or not - - :returns: True, if there was a timestamp constraint, containing - a timestamp field named as defined in field_name, in the query and it - was allowed and syntactically correct. - :returns: False, if there wasn't timestamp constraint, containing a - timestamp field named as defined in field_name, in the query - - :raises InvalidInput: if an operator is unsupported for a given timestamp - field - :raises UnknownArgument: if the timestamp constraint is not allowed in - the query - """ - - for item in query: - if item.field == field_name: - # If *timestamp* or *search_offset* field was specified in the - # query, but timestamp is not supported on that resource, on - # which the query was invoked, then raise an exception. - if not allow_timestamps: - raise wsme.exc.UnknownArgument(field_name, - "not valid for " + - "this resource") - if item.op not in operator_list: - raise wsme.exc.InvalidInput('op', item.op, - 'unimplemented operator for %s' % - item.field) - return True - return False - - -def query_to_kwargs(query, db_func, internal_keys=None, - allow_timestamps=True): - validate_query(query, db_func, internal_keys=internal_keys, - allow_timestamps=allow_timestamps) - query = sanitize_query(query, db_func) - translation = {'user_id': 'user', - 'project_id': 'project', - 'resource_id': 'resource', - 'type': 'alarm_type'} - stamp = {} - kwargs = {} - for i in query: - if i.field == 'timestamp': - if i.op in ('lt', 'le'): - stamp['end_timestamp'] = i.value - stamp['end_timestamp_op'] = i.op - elif i.op in ('gt', 'ge'): - stamp['start_timestamp'] = i.value - stamp['start_timestamp_op'] = i.op - else: - if i.op == 'eq': - if i.field == 'search_offset': - stamp['search_offset'] = i.value - elif i.field == 'enabled': - kwargs[i.field] = i._get_value_as_type('boolean') - else: - key = translation.get(i.field, i.field) - kwargs[key] = i.value - - if stamp: - kwargs.update(_get_query_timestamps(stamp)) - return kwargs - - -def _get_query_timestamps(args=None): - """Return any optional timestamp information in the request. - - Determine the desired range, if any, from the GET arguments. Set - up the query range using the specified offset. - - [query_start ... start_timestamp ... end_timestamp ... query_end] - - Returns a dictionary containing: - - start_timestamp: First timestamp to use for query - start_timestamp_op: First timestamp operator to use for query - end_timestamp: Final timestamp to use for query - end_timestamp_op: Final timestamp operator to use for query - """ - - if args is None: - return {} - search_offset = int(args.get('search_offset', 0)) - - def _parse_timestamp(timestamp): - if not timestamp: - return None - try: - iso_timestamp = timeutils.parse_isotime(timestamp) - iso_timestamp = iso_timestamp.replace(tzinfo=None) - except ValueError: - raise wsme.exc.InvalidInput('timestamp', timestamp, - 'invalid timestamp format') - return iso_timestamp - - start_timestamp = _parse_timestamp(args.get('start_timestamp')) - end_timestamp = _parse_timestamp(args.get('end_timestamp')) - start_timestamp = start_timestamp - datetime.timedelta( - minutes=search_offset) if start_timestamp else None - end_timestamp = end_timestamp + datetime.timedelta( - minutes=search_offset) if end_timestamp else None - return {'start_timestamp': start_timestamp, - 'end_timestamp': end_timestamp, - 'start_timestamp_op': args.get('start_timestamp_op'), - 'end_timestamp_op': args.get('end_timestamp_op')} - - -def set_resp_location_hdr(location): - location = '%s%s' % (pecan.request.script_name, location) - # NOTE(sileht): according the pep-3333 the headers must be - # str in py2 and py3 even this is not the same thing in both - # version - # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues - if six.PY2 and isinstance(location, six.text_type): - location = location.encode('utf-8') - location = urllib_parse.quote(location) - pecan.response.headers['Location'] = location - - -def get_pagination_options(sort, limit, marker, api_model): - sorts = list() - if limit and limit <= 0: - raise wsme.exc.InvalidInput('limit', limit, - 'it should be a positive integer.') - for s in sort or []: - sort_key, __, sort_dir = s.partition(':') - if sort_key not in api_model.SUPPORT_SORT_KEYS: - raise wsme.exc.InvalidInput( - 'sort', s, "the sort parameter should be a pair of sort " - "key and sort dir combined with ':', or only" - " sort key specified and sort dir will be default " - "'asc', the supported sort keys are: %s" % - str(api_model.SUPPORT_SORT_KEYS)) - # the default sort direction is 'asc' - sorts.append((sort_key, sort_dir or 'asc')) - - return {'limit': limit, - 'marker': marker, - 'sort': sorts} diff --git a/aodh/api/hooks.py b/aodh/api/hooks.py deleted file mode 100644 index 3d3e26d2..00000000 --- a/aodh/api/hooks.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy -from pecan import hooks - - -class ConfigHook(hooks.PecanHook): - """Attach the configuration and policy enforcer object to the request. - - That allows controllers to get it. - """ - - def __init__(self, conf): - self.conf = conf - self.enforcer = policy.Enforcer(conf, default_rule="default") - - def before(self, state): - state.request.cfg = self.conf - state.request.enforcer = self.enforcer - - -class DBHook(hooks.PecanHook): - - def __init__(self, alarm_conn): - self.storage = alarm_conn - - def before(self, state): - state.request.storage = self.storage - - -class TranslationHook(hooks.PecanHook): - - def after(self, state): - # After a request has been done, we need to see if - # ClientSideError has added an error onto the response. - # If it has we need to get it info the thread-safe WSGI - # environ to be used by the ParsableErrorMiddleware. - if hasattr(state.response, 'translatable_error'): - state.request.environ['translatable_error'] = ( - state.response.translatable_error) diff --git a/aodh/api/middleware.py b/aodh/api/middleware.py deleted file mode 100644 index 58b292e2..00000000 --- a/aodh/api/middleware.py +++ /dev/null @@ -1,127 +0,0 @@ -# -# Copyright 2013 IBM Corp. -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Middleware to replace the plain text message body of an error -response with one formatted so the client can parse it. - -Based on pecan.middleware.errordocument -""" - -import json - -from lxml import etree -from oslo_log import log -import six -import webob - -from aodh import i18n - -LOG = log.getLogger(__name__) - - -class ParsableErrorMiddleware(object): - """Replace error body with something the client can parse.""" - - @staticmethod - def best_match_language(accept_language): - """Determines best available locale from the Accept-Language header. - - :returns: the best language match or None if the 'Accept-Language' - header was not available in the request. - """ - if not accept_language: - return None - all_languages = i18n.get_available_languages() - return accept_language.best_match(all_languages) - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - # Request for this state, modified by replace_start_response() - # and used when an error is being reported. - state = {} - - def replacement_start_response(status, headers, exc_info=None): - """Overrides the default response to make errors parsable.""" - try: - status_code = int(status.split(' ')[0]) - state['status_code'] = status_code - except (ValueError, TypeError): # pragma: nocover - raise Exception(( - 'ErrorDocumentMiddleware received an invalid ' - 'status %s' % status - )) - else: - if (state['status_code'] // 100) not in (2, 3): - # Remove some headers so we can replace them later - # when we have the full error message and can - # compute the length. - headers = [(h, v) - for (h, v) in headers - if h not in ('Content-Length', 'Content-Type') - ] - # Save the headers in case we need to modify them. - state['headers'] = headers - return start_response(status, headers, exc_info) - - app_iter = self.app(environ, replacement_start_response) - if (state['status_code'] // 100) not in (2, 3): - req = webob.Request(environ) - error = environ.get('translatable_error') - user_locale = self.best_match_language(req.accept_language) - if (req.accept.best_match(['application/json', 'application/xml']) - == 'application/xml'): - content_type = 'application/xml' - try: - # simple check xml is valid - fault = etree.fromstring(b'\n'.join(app_iter)) - # Add the translated error to the xml data - if error is not None: - for fault_string in fault.findall('faultstring'): - fault_string.text = i18n.translate(error, - user_locale) - error_message = etree.tostring(fault) - body = b''.join((b'', - error_message, - b'')) - except etree.XMLSyntaxError as err: - LOG.error('Error parsing HTTP response: %s', err) - error_message = state['status_code'] - body = '%s' % error_message - if six.PY3: - body = body.encode('utf-8') - else: - content_type = 'application/json' - app_data = b'\n'.join(app_iter) - if six.PY3: - app_data = app_data.decode('utf-8') - try: - fault = json.loads(app_data) - if error is not None and 'faultstring' in fault: - fault['faultstring'] = i18n.translate(error, - user_locale) - except ValueError as err: - fault = app_data - body = json.dumps({'error_message': fault}) - if six.PY3: - body = body.encode('utf-8') - - state['headers'].append(('Content-Length', str(len(body)))) - state['headers'].append(('Content-Type', content_type)) - body = [body] - else: - body = app_iter - return body diff --git a/aodh/api/policy.json b/aodh/api/policy.json deleted file mode 100644 index 4fd873e9..00000000 --- a/aodh/api/policy.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "context_is_admin": "role:admin", - "segregation": "rule:context_is_admin", - "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "telemetry:get_alarm": "rule:admin_or_owner", - "telemetry:get_alarms": "rule:admin_or_owner", - "telemetry:query_alarm": "rule:admin_or_owner", - - "telemetry:create_alarm": "", - "telemetry:change_alarm": "rule:admin_or_owner", - "telemetry:delete_alarm": "rule:admin_or_owner", - - "telemetry:get_alarm_state": "rule:admin_or_owner", - "telemetry:change_alarm_state": "rule:admin_or_owner", - - "telemetry:alarm_history": "rule:admin_or_owner", - "telemetry:query_alarm_history": "rule:admin_or_owner" -} diff --git a/aodh/api/rbac.py b/aodh/api/rbac.py deleted file mode 100644 index 7b592b07..00000000 --- a/aodh/api/rbac.py +++ /dev/null @@ -1,107 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Access Control Lists (ACL's) control access the API server.""" - -import pecan - - -def target_from_segregation_rule(headers, enforcer): - """Return a target that corresponds of an alarm returned by segregation rule - - This allows to use project_id: in an oslo_policy rule for query/listing. - - :param headers: HTTP headers dictionary - :param enforcer: policy enforcer - - :returns: target - """ - - project_id = get_limited_to_project(headers, enforcer) - if project_id is not None: - return {'project_id': project_id} - return {} - - -def enforce(policy_name, headers, enforcer, target): - """Return the user and project the request should be limited to. - - :param policy_name: the policy name to validate authz against. - :param headers: HTTP headers dictionary - :param enforcer: policy enforcer - :param target: the alarm or "auto" to - - """ - rule_method = "telemetry:" + policy_name - - credentials = { - 'roles': headers.get('X-Roles', "").split(","), - 'user_id': headers.get('X-User-Id'), - 'project_id': headers.get('X-Project-Id'), - } - - # TODO(sileht): add deprecation warning to be able to remove this: - # maintain backward compat with Juno and previous by allowing the action if - # there is no rule defined for it - rules = enforcer.rules.keys() - if rule_method not in rules: - return - - if not enforcer.enforce(rule_method, target, credentials): - pecan.core.abort(status_code=403, - detail='RBAC Authorization Failed') - - -# TODO(fabiog): these methods are still used because the scoping part is really -# convoluted and difficult to separate out. - -def get_limited_to(headers, enforcer): - """Return the user and project the request should be limited to. - - :param headers: HTTP headers dictionary - :param enforcer: policy enforcer - :return: A tuple of (user, project), set to None if there's no limit on - one of these. - - """ - # TODO(sileht): Only filtering on role work currently for segregation - # oslo.policy expects the target to be the alarm. That will allow - # creating more enhanced rbac. But for now we enforce the - # scoping of request to the project-id, so... - target = {} - credentials = { - 'roles': headers.get('X-Roles', "").split(","), - } - # maintain backward compat with Juno and previous by using context_is_admin - # rule if the segregation rule (added in Kilo) is not defined - rules = enforcer.rules.keys() - rule_name = 'segregation' if 'segregation' in rules else 'context_is_admin' - if not enforcer.enforce(rule_name, target, credentials): - return headers.get('X-User-Id'), headers.get('X-Project-Id') - - return None, None - - -def get_limited_to_project(headers, enforcer): - """Return the project the request should be limited to. - - :param headers: HTTP headers dictionary - :param enforcer: policy enforcer - :return: A project, or None if there's no limit on it. - - """ - return get_limited_to(headers, enforcer)[1] diff --git a/aodh/cmd/__init__.py b/aodh/cmd/__init__.py deleted file mode 100644 index 0be90b5e..00000000 --- a/aodh/cmd/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import sys - - -def config_generator(): - try: - from oslo_config import generator - generator.main( - ['--config-file', - '%s/aodh-config-generator.conf' % os.path.dirname(__file__)] - + sys.argv[1:]) - except Exception as e: - print("Unable to build sample configuration file: %s" % e) - return 1 diff --git a/aodh/cmd/alarm.py b/aodh/cmd/alarm.py deleted file mode 100644 index 3914be13..00000000 --- a/aodh/cmd/alarm.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cotyledon - -from aodh import evaluator as evaluator_svc -from aodh import event as event_svc -from aodh import notifier as notifier_svc -from aodh import service - - -def notifier(): - conf = service.prepare_service() - sm = cotyledon.ServiceManager() - sm.add(notifier_svc.AlarmNotifierService, - workers=conf.notifier.workers, args=(conf,)) - sm.run() - - -def evaluator(): - conf = service.prepare_service() - sm = cotyledon.ServiceManager() - sm.add(evaluator_svc.AlarmEvaluationService, - workers=conf.evaluator.workers, args=(conf,)) - sm.run() - - -def listener(): - conf = service.prepare_service() - sm = cotyledon.ServiceManager() - sm.add(event_svc.EventAlarmEvaluationService, - workers=conf.listener.workers, args=(conf,)) - sm.run() diff --git a/aodh/cmd/aodh-config-generator.conf b/aodh/cmd/aodh-config-generator.conf deleted file mode 100644 index 87648119..00000000 --- a/aodh/cmd/aodh-config-generator.conf +++ /dev/null @@ -1,12 +0,0 @@ -[DEFAULT] -wrap_width = 79 -namespace = aodh -namespace = aodh-auth -namespace = oslo.db -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.middleware.cors -namespace = oslo.middleware.healthcheck -namespace = oslo.middleware.http_proxy_to_wsgi -namespace = oslo.policy -namespace = keystonemiddleware.auth_token diff --git a/aodh/cmd/storage.py b/aodh/cmd/storage.py deleted file mode 100644 index d736d78f..00000000 --- a/aodh/cmd/storage.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from aodh import service -from aodh import storage - - -LOG = log.getLogger(__name__) - - -def dbsync(): - conf = service.prepare_service() - storage.get_connection_from_config(conf).upgrade() - - -def expirer(): - conf = service.prepare_service() - - if conf.database.alarm_history_time_to_live > 0: - LOG.debug("Clearing expired alarm history data") - storage_conn = storage.get_connection_from_config(conf) - storage_conn.clear_expired_alarm_history_data( - conf.database.alarm_history_time_to_live) - else: - LOG.info("Nothing to clean, database alarm history time to live " - "is disabled") diff --git a/aodh/conf/__init__.py b/aodh/conf/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/conf/defaults.py b/aodh/conf/defaults.py deleted file mode 100644 index 8750c9c1..00000000 --- a/aodh/conf/defaults.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_middleware import cors - - -def set_cors_middleware_defaults(): - """Update default configuration options for oslo.middleware.""" - cors.set_defaults( - allow_headers=['X-Auth-Token', - 'X-Openstack-Request-Id', - 'X-Subject-Token'], - expose_headers=['X-Auth-Token', - 'X-Openstack-Request-Id', - 'X-Subject-Token'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH'] - ) diff --git a/aodh/coordination.py b/aodh/coordination.py deleted file mode 100644 index 37a847d1..00000000 --- a/aodh/coordination.py +++ /dev/null @@ -1,246 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import bisect -import hashlib -import struct - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import uuidutils -import six -import tenacity -import tooz.coordination - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('backend_url', - help='The backend URL to use for distributed coordination. If ' - 'left empty, per-deployment central agent and per-host ' - 'compute agent won\'t do workload ' - 'partitioning and will only function correctly if a ' - 'single instance of that service is running.'), - cfg.FloatOpt('heartbeat', - default=1.0, - help='Number of seconds between heartbeats for distributed ' - 'coordination.'), - cfg.FloatOpt('check_watchers', - default=10.0, - help='Number of seconds between checks to see if group ' - 'membership has changed'), - cfg.IntOpt('retry_backoff', - default=1, - help='Retry backoff factor when retrying to connect with' - ' coordination backend'), - cfg.IntOpt('max_retry_interval', - default=30, - help='Maximum number of seconds between retry to join ' - 'partitioning group') -] - - -class ErrorJoiningPartitioningGroup(Exception): - def __init__(self): - super(ErrorJoiningPartitioningGroup, self).__init__(( - 'Error occurred when joining partitioning group')) - - -class MemberNotInGroupError(Exception): - def __init__(self, group_id, members, my_id): - super(MemberNotInGroupError, self).__init__(( - 'Group ID: %(group_id)s, Members: %(members)s, Me: %(me)s: ' - 'Current agent is not part of group and cannot take tasks') % - {'group_id': group_id, 'members': members, 'me': my_id}) - - -class HashRing(object): - - def __init__(self, nodes, replicas=100): - self._ring = dict() - self._sorted_keys = [] - - for node in nodes: - for r in six.moves.range(replicas): - hashed_key = self._hash('%s-%s' % (node, r)) - self._ring[hashed_key] = node - self._sorted_keys.append(hashed_key) - self._sorted_keys.sort() - - @staticmethod - def _hash(key): - return struct.unpack_from('>I', - hashlib.md5(str(key).encode()).digest())[0] - - def _get_position_on_ring(self, key): - hashed_key = self._hash(key) - position = bisect.bisect(self._sorted_keys, hashed_key) - return position if position < len(self._sorted_keys) else 0 - - def get_node(self, key): - if not self._ring: - return None - pos = self._get_position_on_ring(key) - return self._ring[self._sorted_keys[pos]] - - -class PartitionCoordinator(object): - """Workload partitioning coordinator. - - This class uses the `tooz` library to manage group membership. - - To ensure that the other agents know this agent is still alive, - the `heartbeat` method should be called periodically. - - Coordination errors and reconnects are handled under the hood, so the - service using the partition coordinator need not care whether the - coordination backend is down. The `extract_my_subset` will simply return an - empty iterable in this case. - """ - - def __init__(self, conf, my_id=None): - self.conf = conf - self.backend_url = self.conf.coordination.backend_url - self._coordinator = None - self._groups = set() - self._my_id = my_id or uuidutils.generate_uuid() - - def start(self): - if self.backend_url: - try: - self._coordinator = tooz.coordination.get_coordinator( - self.backend_url, self._my_id) - self._coordinator.start() - LOG.info('Coordination backend started successfully.') - except tooz.coordination.ToozError: - LOG.exception('Error connecting to coordination backend.') - - def stop(self): - if not self._coordinator: - return - - for group in list(self._groups): - self.leave_group(group) - - try: - self._coordinator.stop() - except tooz.coordination.ToozError: - LOG.exception('Error connecting to coordination backend.') - finally: - self._coordinator = None - - def is_active(self): - return self._coordinator is not None - - def heartbeat(self): - if self._coordinator: - if not self._coordinator.is_started: - # re-connect - self.start() - try: - self._coordinator.heartbeat() - except tooz.coordination.ToozError: - LOG.exception('Error sending a heartbeat to coordination ' - 'backend.') - - def join_group(self, group_id): - if (not self._coordinator or not self._coordinator.is_started - or not group_id): - return - - @tenacity.retry( - wait=tenacity.wait_exponential( - multiplier=self.conf.coordination.retry_backoff, - max=self.conf.coordination.max_retry_interval), - retry=tenacity.retry_if_exception_type( - ErrorJoiningPartitioningGroup)) - def _inner(): - try: - join_req = self._coordinator.join_group(group_id) - join_req.get() - LOG.info('Joined partitioning group %s', group_id) - except tooz.coordination.MemberAlreadyExist: - return - except tooz.coordination.GroupNotCreated: - create_grp_req = self._coordinator.create_group(group_id) - try: - create_grp_req.get() - except tooz.coordination.GroupAlreadyExist: - pass - raise ErrorJoiningPartitioningGroup() - except tooz.coordination.ToozError: - LOG.exception('Error joining partitioning group %s,' - ' re-trying', group_id) - raise ErrorJoiningPartitioningGroup() - self._groups.add(group_id) - - return _inner() - - def leave_group(self, group_id): - if group_id not in self._groups: - return - if self._coordinator: - self._coordinator.leave_group(group_id) - self._groups.remove(group_id) - LOG.info('Left partitioning group %s', group_id) - - def _get_members(self, group_id): - if not self._coordinator: - return [self._my_id] - - while True: - get_members_req = self._coordinator.get_members(group_id) - try: - return get_members_req.get() - except tooz.coordination.GroupNotCreated: - self.join_group(group_id) - - @tenacity.retry( - wait=tenacity.wait_random(max=2), - stop=tenacity.stop_after_attempt(5), - retry=tenacity.retry_if_exception_type(MemberNotInGroupError), - reraise=True) - def extract_my_subset(self, group_id, universal_set): - """Filters an iterable, returning only objects assigned to this agent. - - We have a list of objects and get a list of active group members from - `tooz`. We then hash all the objects into buckets and return only - the ones that hashed into *our* bucket. - """ - if not group_id: - return universal_set - if group_id not in self._groups: - self.join_group(group_id) - try: - members = self._get_members(group_id) - LOG.debug('Members of group: %s, Me: %s', members, self._my_id) - if self._my_id not in members: - LOG.warning('Cannot extract tasks because agent failed to ' - 'join group properly. Rejoining group.') - self.join_group(group_id) - members = self._get_members(group_id) - if self._my_id not in members: - raise MemberNotInGroupError(group_id, members, self._my_id) - LOG.debug('Members of group: %s, Me: %s', members, self._my_id) - hr = HashRing(members) - LOG.debug('Universal set: %s', universal_set) - my_subset = [v for v in universal_set - if hr.get_node(str(v)) == self._my_id] - LOG.debug('My subset: %s', my_subset) - return my_subset - except tooz.coordination.ToozError: - LOG.exception('Error getting group membership info from ' - 'coordination backend.') - return [] diff --git a/aodh/evaluator/__init__.py b/aodh/evaluator/__init__.py deleted file mode 100644 index f8f7798d..00000000 --- a/aodh/evaluator/__init__.py +++ /dev/null @@ -1,277 +0,0 @@ -# -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import abc -import datetime -import json -import threading - -from concurrent import futures -import cotyledon -import croniter -from futurist import periodics -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -from oslo_utils import uuidutils -import pytz -import six -from stevedore import extension - -import aodh -from aodh import coordination -from aodh import keystone_client -from aodh import messaging -from aodh import queue -from aodh import storage -from aodh.storage import models - -LOG = log.getLogger(__name__) - -UNKNOWN = 'insufficient data' -OK = 'ok' -ALARM = 'alarm' - - -OPTS = [ - cfg.BoolOpt('record_history', - default=True, - help='Record alarm change events.' - ), -] - - -@six.add_metaclass(abc.ABCMeta) -class Evaluator(object): - """Base class for alarm rule evaluator plugins.""" - - def __init__(self, conf): - self.conf = conf - self.notifier = queue.AlarmNotifier(self.conf) - self.storage_conn = None - self._ks_client = None - self._alarm_change_notifier = None - - @property - def ks_client(self): - if self._ks_client is None: - self._ks_client = keystone_client.get_client(self.conf) - return self._ks_client - - @property - def _storage_conn(self): - if not self.storage_conn: - self.storage_conn = storage.get_connection_from_config(self.conf) - return self.storage_conn - - @property - def alarm_change_notifier(self): - if not self._alarm_change_notifier: - transport = messaging.get_transport(self.conf) - self._alarm_change_notifier = messaging.get_notifier( - transport, publisher_id="aodh.evaluator") - return self._alarm_change_notifier - - def _record_change(self, alarm, reason): - if not self.conf.record_history: - return - type = models.AlarmChange.STATE_TRANSITION - detail = json.dumps({'state': alarm.state, - 'transition_reason': reason}) - user_id, project_id = self.ks_client.user_id, self.ks_client.project_id - on_behalf_of = alarm.project_id - now = timeutils.utcnow() - payload = dict(event_id=uuidutils.generate_uuid(), - alarm_id=alarm.alarm_id, - type=type, - detail=detail, - user_id=user_id, - project_id=project_id, - on_behalf_of=on_behalf_of, - timestamp=now) - - try: - self._storage_conn.record_alarm_change(payload) - except aodh.NotImplementedError: - pass - notification = "alarm.state_transition" - self.alarm_change_notifier.info({}, - notification, payload) - - def _refresh(self, alarm, state, reason, reason_data, always_record=False): - """Refresh alarm state.""" - try: - previous = alarm.state - alarm.state = state - alarm.state_reason = reason - if previous != state or always_record: - LOG.info('alarm %(id)s transitioning to %(state)s because ' - '%(reason)s', {'id': alarm.alarm_id, - 'state': state, - 'reason': reason}) - try: - self._storage_conn.update_alarm(alarm) - except storage.AlarmNotFound: - LOG.warning("Skip updating this alarm's state, the" - "alarm: %s has been deleted", - alarm.alarm_id) - else: - self._record_change(alarm, reason) - self.notifier.notify(alarm, previous, reason, reason_data) - elif alarm.repeat_actions: - self.notifier.notify(alarm, previous, reason, reason_data) - except Exception: - # retry will occur naturally on the next evaluation - # cycle (unless alarm state reverts in the meantime) - LOG.exception('alarm state update failed') - - @classmethod - def within_time_constraint(cls, alarm): - """Check whether the alarm is within at least one of its time limits. - - If there are none, then the answer is yes. - """ - if not alarm.time_constraints: - return True - - now_utc = timeutils.utcnow().replace(tzinfo=pytz.utc) - for tc in alarm.time_constraints: - tz = pytz.timezone(tc['timezone']) if tc['timezone'] else None - now_tz = now_utc.astimezone(tz) if tz else now_utc - start_cron = croniter.croniter(tc['start'], now_tz) - if cls._is_exact_match(start_cron, now_tz): - return True - # start_cron.cur has changed in _is_exact_match(), - # croniter cannot recover properly in some corner case. - start_cron = croniter.croniter(tc['start'], now_tz) - latest_start = start_cron.get_prev(datetime.datetime) - duration = datetime.timedelta(seconds=tc['duration']) - if latest_start <= now_tz <= latest_start + duration: - return True - return False - - @staticmethod - def _is_exact_match(cron, ts): - """Handle edge in case when both parameters are equal. - - Handle edge case where if the timestamp is the same as the - cron point in time to the minute, croniter returns the previous - start, not the current. We can check this by first going one - step back and then one step forward and check if we are - at the original point in time. - """ - cron.get_prev() - diff = (ts - cron.get_next(datetime.datetime)).total_seconds() - return abs(diff) < 60 # minute precision - - @abc.abstractmethod - def evaluate(self, alarm): - """Interface definition. - - evaluate an alarm - alarm Alarm: an instance of the Alarm - """ - - -class AlarmEvaluationService(cotyledon.Service): - - PARTITIONING_GROUP_NAME = "alarm_evaluator" - EVALUATOR_EXTENSIONS_NAMESPACE = "aodh.evaluator" - - def __init__(self, worker_id, conf): - super(AlarmEvaluationService, self).__init__(worker_id) - self.conf = conf - - ef = lambda: futures.ThreadPoolExecutor(max_workers=10) - self.periodic = periodics.PeriodicWorker.create( - [], executor_factory=ef) - - self.evaluators = extension.ExtensionManager( - namespace=self.EVALUATOR_EXTENSIONS_NAMESPACE, - invoke_on_load=True, - invoke_args=(self.conf,) - ) - self.storage_conn = storage.get_connection_from_config(self.conf) - - self.partition_coordinator = coordination.PartitionCoordinator( - self.conf) - self.partition_coordinator.start() - self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME) - - # allow time for coordination if necessary - delay_start = self.partition_coordinator.is_active() - - if self.evaluators: - @periodics.periodic(spacing=self.conf.evaluation_interval, - run_immediately=not delay_start) - def evaluate_alarms(): - self._evaluate_assigned_alarms() - - self.periodic.add(evaluate_alarms) - - if self.partition_coordinator.is_active(): - heartbeat_interval = min(self.conf.coordination.heartbeat, - self.conf.evaluation_interval / 4) - - @periodics.periodic(spacing=heartbeat_interval, - run_immediately=True) - def heartbeat(): - self.partition_coordinator.heartbeat() - - self.periodic.add(heartbeat) - - t = threading.Thread(target=self.periodic.start) - t.daemon = True - t.start() - - def terminate(self): - self.periodic.stop() - self.partition_coordinator.stop() - self.periodic.wait() - - def _evaluate_assigned_alarms(self): - try: - alarms = self._assigned_alarms() - LOG.info('initiating evaluation cycle on %d alarms', - len(alarms)) - for alarm in alarms: - self._evaluate_alarm(alarm) - except Exception: - LOG.exception('alarm evaluation cycle failed') - - def _evaluate_alarm(self, alarm): - """Evaluate the alarms assigned to this evaluator.""" - if alarm.type not in self.evaluators: - LOG.debug('skipping alarm %s: type unsupported', alarm.alarm_id) - return - - LOG.debug('evaluating alarm %s', alarm.alarm_id) - try: - self.evaluators[alarm.type].obj.evaluate(alarm) - except Exception: - LOG.exception('Failed to evaluate alarm %s', alarm.alarm_id) - - def _assigned_alarms(self): - # NOTE(r-mibu): The 'event' type alarms will be evaluated by the - # event-driven alarm evaluator, so this periodical evaluator skips - # those alarms. - all_alarms = self.storage_conn.get_alarms(enabled=True, - exclude=dict(type='event')) - all_alarms = list(all_alarms) - all_alarm_ids = [a.alarm_id for a in all_alarms] - selected = self.partition_coordinator.extract_my_subset( - self.PARTITIONING_GROUP_NAME, all_alarm_ids) - return list(filter(lambda a: a.alarm_id in selected, all_alarms)) diff --git a/aodh/evaluator/composite.py b/aodh/evaluator/composite.py deleted file mode 100644 index bf1de775..00000000 --- a/aodh/evaluator/composite.py +++ /dev/null @@ -1,245 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log -import six -import stevedore - -from aodh import evaluator -from aodh.evaluator import threshold -from aodh.i18n import _ - -LOG = log.getLogger(__name__) - -STATE_CHANGE = {evaluator.ALARM: 'outside their threshold.', - evaluator.OK: 'inside their threshold.', - evaluator.UNKNOWN: 'state evaluated to unknown.'} - - -class RuleTarget(object): - - def __init__(self, rule, rule_evaluator, rule_name): - self.rule = rule - self.type = rule.get('type') - self.rule_evaluator = rule_evaluator - self.rule_name = rule_name - self.state = None - self.trending_state = None - self.statistics = None - self.evaluated = False - - def evaluate(self): - # Evaluate a sub-rule of composite rule - if not self.evaluated: - LOG.debug('Evaluating %(type)s rule: %(rule)s', - {'type': self.type, 'rule': self.rule}) - try: - self.state, self.trending_state, self.statistics, __, __ = \ - self.rule_evaluator.evaluate_rule(self.rule) - except threshold.InsufficientDataError as e: - self.state = evaluator.UNKNOWN - self.trending_state = None - self.statistics = e.statistics - self.evaluated = True - - -class RuleEvaluationBase(object): - def __init__(self, rule_target): - self.rule_target = rule_target - - def __str__(self): - return self.rule_target.rule_name - - -class OkEvaluation(RuleEvaluationBase): - - def __bool__(self): - self.rule_target.evaluate() - return self.rule_target.state == evaluator.OK - - __nonzero__ = __bool__ - - -class AlarmEvaluation(RuleEvaluationBase): - - def __bool__(self): - self.rule_target.evaluate() - return self.rule_target.state == evaluator.ALARM - - __nonzero__ = __bool__ - - -class AndOp(object): - def __init__(self, rule_targets): - self.rule_targets = rule_targets - - def __bool__(self): - return all(self.rule_targets) - - def __str__(self): - return '(' + ' and '.join(six.moves.map(str, self.rule_targets)) + ')' - - __nonzero__ = __bool__ - - -class OrOp(object): - def __init__(self, rule_targets): - self.rule_targets = rule_targets - - def __bool__(self): - return any(self.rule_targets) - - def __str__(self): - return '(' + ' or '.join(six.moves.map(str, self.rule_targets)) + ')' - - __nonzero__ = __bool__ - - -class CompositeEvaluator(evaluator.Evaluator): - def __init__(self, conf): - super(CompositeEvaluator, self).__init__(conf) - self.conf = conf - self._threshold_evaluators = None - self.rule_targets = [] - self.rule_name_prefix = 'rule' - self.rule_num = 0 - - @property - def threshold_evaluators(self): - if not self._threshold_evaluators: - threshold_types = ('threshold', 'gnocchi_resources_threshold', - 'gnocchi_aggregation_by_metrics_threshold', - 'gnocchi_aggregation_by_resources_threshold') - self._threshold_evaluators = stevedore.NamedExtensionManager( - 'aodh.evaluator', threshold_types, invoke_on_load=True, - invoke_args=(self.conf,)) - return self._threshold_evaluators - - def _parse_composite_rule(self, alarm_rule): - """Parse the composite rule. - - The composite rule is assembled by sub threshold rules with 'and', - 'or', the form can be nested. e.g. the form of composite rule can be - like this: - { - "and": [threshold_rule0, threshold_rule1, - {'or': [threshold_rule2, threshold_rule3, - threshold_rule4, threshold_rule5]}] - } - """ - if (isinstance(alarm_rule, dict) and len(alarm_rule) == 1 - and list(alarm_rule)[0] in ('and', 'or')): - and_or_key = list(alarm_rule)[0] - if and_or_key == 'and': - rules = (self._parse_composite_rule(r) for r in - alarm_rule['and']) - rules_alarm, rules_ok = zip(*rules) - return AndOp(rules_alarm), OrOp(rules_ok) - else: - rules = (self._parse_composite_rule(r) for r in - alarm_rule['or']) - rules_alarm, rules_ok = zip(*rules) - return OrOp(rules_alarm), AndOp(rules_ok) - else: - rule_evaluator = self.threshold_evaluators[alarm_rule['type']].obj - self.rule_num += 1 - name = self.rule_name_prefix + str(self.rule_num) - rule = RuleTarget(alarm_rule, rule_evaluator, name) - self.rule_targets.append(rule) - return AlarmEvaluation(rule), OkEvaluation(rule) - - def _reason(self, alarm, new_state, rule_target_alarm): - transition = alarm.state != new_state - reason_data = { - 'type': 'composite', - 'composition_form': str(rule_target_alarm)} - root_cause_rules = {} - for rule in self.rule_targets: - if rule.state == new_state: - root_cause_rules.update({rule.rule_name: rule.rule}) - reason_data.update(causative_rules=root_cause_rules) - params = {'state': new_state, - 'expression': str(rule_target_alarm), - 'rules': ', '.join(sorted(root_cause_rules)), - 'description': STATE_CHANGE[new_state]} - if transition: - reason = (_('Composite rule alarm with composition form: ' - '%(expression)s transition to %(state)s, due to ' - 'rules: %(rules)s %(description)s') % params) - - else: - reason = (_('Composite rule alarm with composition form: ' - '%(expression)s remaining as %(state)s, due to ' - 'rules: %(rules)s %(description)s') % params) - - return reason, reason_data - - def _evaluate_sufficient(self, alarm, rule_target_alarm, rule_target_ok): - # Some of evaluated rules are unknown states or trending states. - for rule in self.rule_targets: - if rule.trending_state is not None: - if alarm.state == evaluator.UNKNOWN: - rule.state = rule.trending_state - elif rule.trending_state == evaluator.ALARM: - rule.state = evaluator.OK - elif rule.trending_state == evaluator.OK: - rule.state = evaluator.ALARM - else: - rule.state = alarm.state - - alarm_triggered = bool(rule_target_alarm) - if alarm_triggered: - reason, reason_data = self._reason(alarm, evaluator.ALARM, - rule_target_alarm) - self._refresh(alarm, evaluator.ALARM, reason, reason_data) - return True - - ok_result = bool(rule_target_ok) - if ok_result: - reason, reason_data = self._reason(alarm, evaluator.OK, - rule_target_alarm) - self._refresh(alarm, evaluator.OK, reason, reason_data) - return True - return False - - def evaluate(self, alarm): - if not self.within_time_constraint(alarm): - LOG.debug('Attempted to evaluate alarm %s, but it is not ' - 'within its time constraint.', alarm.alarm_id) - return - - LOG.debug("Evaluating composite rule alarm %s ...", alarm.alarm_id) - self.rule_targets = [] - self.rule_num = 0 - rule_target_alarm, rule_target_ok = self._parse_composite_rule( - alarm.rule) - - sufficient = self._evaluate_sufficient(alarm, rule_target_alarm, - rule_target_ok) - if not sufficient: - for rule in self.rule_targets: - rule.evaluate() - sufficient = self._evaluate_sufficient(alarm, rule_target_alarm, - rule_target_ok) - - if not sufficient: - # The following unknown situations is like these: - # 1. 'unknown' and 'alarm' - # 2. 'unknown' or 'ok' - reason, reason_data = self._reason(alarm, evaluator.UNKNOWN, - rule_target_alarm) - if alarm.state != evaluator.UNKNOWN: - self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data) - else: - LOG.debug(reason) diff --git a/aodh/evaluator/event.py b/aodh/evaluator/event.py deleted file mode 100644 index 1be168a7..00000000 --- a/aodh/evaluator/event.py +++ /dev/null @@ -1,275 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import operator - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import fnmatch -from oslo_utils import timeutils -import six - -from aodh import evaluator -from aodh.i18n import _ - -LOG = log.getLogger(__name__) - -COMPARATORS = { - 'gt': operator.gt, - 'lt': operator.lt, - 'ge': operator.ge, - 'le': operator.le, - 'eq': operator.eq, - 'ne': operator.ne, -} - -OPTS = [ - cfg.IntOpt('event_alarm_cache_ttl', - default=60, - help='TTL of event alarm caches, in seconds. ' - 'Set to 0 to disable caching.'), -] - - -def _sanitize_trait_value(value, trait_type): - if trait_type in (2, 'integer'): - return int(value) - elif trait_type in (3, 'float'): - return float(value) - elif trait_type in (4, 'datetime'): - return timeutils.normalize_time(timeutils.parse_isotime(value)) - else: - return six.text_type(value) - - -class InvalidEvent(Exception): - """Error raised when the received event is missing mandatory fields.""" - - -class Event(object): - """Wrapped event object to hold converted values for this evaluator.""" - - TRAIT_FIELD = 0 - TRAIT_TYPE = 1 - TRAIT_VALUE = 2 - - def __init__(self, event): - self.obj = event - self._validate() - self.id = event.get('message_id') - self._parse_traits() - - def _validate(self): - """Validate received event has mandatory parameters.""" - - if not self.obj: - LOG.error('Received invalid event (empty or None)') - raise InvalidEvent() - - if not self.obj.get('event_type'): - LOG.error('Failed to extract event_type from event = %s', - self.obj) - raise InvalidEvent() - - if not self.obj.get('message_id'): - LOG.error('Failed to extract message_id from event = %s', - self.obj) - raise InvalidEvent() - - def _parse_traits(self): - self.traits = {} - self.project = '' - for t in self.obj.get('traits', []): - k = t[self.TRAIT_FIELD] - v = _sanitize_trait_value(t[self.TRAIT_VALUE], t[self.TRAIT_TYPE]) - self.traits[k] = v - if k in ('tenant_id', 'project_id'): - self.project = v - - def get_value(self, field): - if field.startswith('traits.'): - key = field.split('.', 1)[-1] - return self.traits.get(key) - - v = self.obj - for f in field.split('.'): - if hasattr(v, 'get'): - v = v.get(f) - else: - return None - return v - - -class Alarm(object): - """Wrapped alarm object to hold converted values for this evaluator.""" - - TRAIT_TYPES = { - 'none': 0, - 'string': 1, - 'integer': 2, - 'float': 3, - 'datetime': 4, - } - - def __init__(self, alarm): - self.obj = alarm - self.id = alarm.alarm_id - self._parse_query() - - def _parse_query(self): - self.query = [] - for q in self.obj.rule.get('query', []): - if not q['field'].startswith('traits.'): - self.query.append(q) - continue - type_num = self.TRAIT_TYPES[q.get('type') or 'string'] - field = q['field'] - value = _sanitize_trait_value(q.get('value'), type_num) - op = COMPARATORS[q.get('op', 'eq')] - self.query.append({'field': field, 'value': value, 'op': op}) - - def fired_and_no_repeat(self): - return (not self.obj.repeat_actions and - self.obj.state == evaluator.ALARM) - - def event_type_to_watch(self, event_type): - return fnmatch.fnmatch(event_type, self.obj.rule['event_type']) - - -class EventAlarmEvaluator(evaluator.Evaluator): - - def __init__(self, conf): - super(EventAlarmEvaluator, self).__init__(conf) - self.caches = {} - - def evaluate_events(self, events): - """Evaluate the events by referring related alarms.""" - - if not isinstance(events, list): - events = [events] - - LOG.debug('Starting event alarm evaluation: #events = %d', - len(events)) - for e in events: - LOG.debug('Evaluating event: event = %s', e) - try: - event = Event(e) - except InvalidEvent: - LOG.warning('Event <%s> is invalid, aborting evaluation ' - 'for it.', e) - continue - - for id, alarm in six.iteritems( - self._get_project_alarms(event.project)): - try: - self._evaluate_alarm(alarm, event) - except Exception: - LOG.exception('Failed to evaluate alarm (id=%(a)s) ' - 'triggered by event = %(e)s.', - {'a': id, 'e': e}) - - LOG.debug('Finished event alarm evaluation.') - - def _get_project_alarms(self, project): - if self.conf.event_alarm_cache_ttl and project in self.caches: - if timeutils.is_older_than(self.caches[project]['updated'], - self.conf.event_alarm_cache_ttl): - del self.caches[project] - else: - return self.caches[project]['alarms'] - - # TODO(r-mibu): Implement "changes-since" at the storage API and make - # this function update only alarms changed from the last access. - alarms = {a.alarm_id: Alarm(a) for a in - self._storage_conn.get_alarms(enabled=True, - alarm_type='event', - project=project)} - - if self.conf.event_alarm_cache_ttl: - self.caches[project] = { - 'alarms': alarms, - 'updated': timeutils.utcnow() - } - - return alarms - - def _evaluate_alarm(self, alarm, event): - """Evaluate the alarm by referring the received event. - - This function compares each condition of the alarm on the assumption - that all conditions are combined by AND operator. - When the received event met conditions defined in alarm 'event_type' - and 'query', the alarm will be fired and updated to state='alarm' - (alarmed). - Note: by this evaluator, the alarm won't be changed to state='ok' - nor state='insufficient data'. - """ - - LOG.debug('Evaluating alarm (id=%(a)s) triggered by event ' - '(message_id=%(e)s).', {'a': alarm.id, 'e': event.id}) - - if alarm.fired_and_no_repeat(): - LOG.debug('Skip evaluation of the alarm id=%s which have already ' - 'fired.', alarm.id) - return - - if not alarm.event_type_to_watch(event.obj['event_type']): - LOG.debug('Aborting evaluation of the alarm (id=%s) since ' - 'event_type is not matched.', alarm.id) - return - - def _compare(condition): - v = event.get_value(condition['field']) - LOG.debug('Comparing value=%(v)s against condition=%(c)s .', - {'v': v, 'c': condition}) - return condition['op'](v, condition['value']) - - for condition in alarm.query: - if not _compare(condition): - LOG.debug('Aborting evaluation of the alarm due to ' - 'unmet condition=%s .', condition) - return - - self._fire_alarm(alarm, event) - - def _fire_alarm(self, alarm, event): - """Update alarm state and fire alarm via alarm notifier.""" - - state = evaluator.ALARM - reason = (_('Event hits the ' - 'query .') % - {'id': event.id, - 'event_type': event.get_value('event_type'), - 'alarm_query': jsonutils.dumps(alarm.obj.rule['query'], - sort_keys=True)}) - reason_data = {'type': 'event', 'event': event.obj} - always_record = alarm.obj.repeat_actions - self._refresh(alarm.obj, state, reason, reason_data, always_record) - - def _refresh(self, alarm, state, reason, reason_data, always_record): - super(EventAlarmEvaluator, self)._refresh(alarm, state, - reason, reason_data, - always_record) - - project = alarm.project_id - if self.conf.event_alarm_cache_ttl and project in self.caches: - self.caches[project]['alarms'][alarm.alarm_id].obj.state = state - - # NOTE(r-mibu): This method won't be used, but we have to define here in - # order to overwrite the abstract method in the super class. - # TODO(r-mibu): Change the base (common) class design for evaluators. - def evaluate(self, alarm): - pass diff --git a/aodh/evaluator/gnocchi.py b/aodh/evaluator/gnocchi.py deleted file mode 100644 index 39bcbf09..00000000 --- a/aodh/evaluator/gnocchi.py +++ /dev/null @@ -1,159 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from gnocchiclient import client -from gnocchiclient import exceptions -from oslo_log import log -from oslo_serialization import jsonutils - -from aodh.evaluator import threshold -from aodh import keystone_client - -LOG = log.getLogger(__name__) - -# The list of points that Gnocchi API returned is composed -# of tuples with (timestamp, granularity, value) -GRANULARITY = 1 -VALUE = 2 - - -class GnocchiBase(threshold.ThresholdEvaluator): - def __init__(self, conf): - super(GnocchiBase, self).__init__(conf) - self._gnocchi_client = client.Client( - '1', keystone_client.get_session(conf), - interface=conf.service_credentials.interface, - region_name=conf.service_credentials.region_name) - - @staticmethod - def _sanitize(rule, statistics): - """Return the datapoints that correspond to the alarm granularity""" - # TODO(sileht): if there's no direct match, but there is an archive - # policy with granularity that's an even divisor or the period, - # we could potentially do a mean-of-means (or max-of-maxes or whatever, - # but not a stddev-of-stddevs). - # TODO(sileht): support alarm['exclude_outliers'] - LOG.debug('sanitize stats %s', statistics) - statistics = [stats[VALUE] for stats in statistics - if stats[GRANULARITY] == rule['granularity']] - if not statistics: - raise threshold.InsufficientDataError( - "No datapoint for granularity %s" % rule['granularity'], []) - statistics = statistics[-rule['evaluation_periods']:] - LOG.debug('pruned statistics to %d', len(statistics)) - return statistics - - -class GnocchiResourceThresholdEvaluator(GnocchiBase): - def _statistics(self, rule, start, end): - try: - return self._gnocchi_client.metric.get_measures( - metric=rule['metric'], - start=start, stop=end, - resource_id=rule['resource_id'], - aggregation=rule['aggregation_method']) - except exceptions.MetricNotFound: - raise threshold.InsufficientDataError( - 'metric %s for resource %s does not exists' % - (rule['metric'], rule['resource_id']), []) - except exceptions.ResourceNotFound: - raise threshold.InsufficientDataError( - 'resource %s does not exists' % rule['resource_id'], []) - except exceptions.NotFound: - # TODO(sileht): gnocchiclient should raise a explicit - # exception for AggregationNotFound, this API endpoint - # can only raise 3 different 404, so we are safe to - # assume this is an AggregationNotFound for now. - raise threshold.InsufficientDataError( - 'aggregation %s does not exist for ' - 'metric %s of resource %s' % (rule['aggregation_method'], - rule['metric'], - rule['resource_id']), - []) - except Exception as e: - msg = 'alarm statistics retrieval failed: %s' % e - LOG.warning(msg) - raise threshold.InsufficientDataError(msg, []) - - -class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase): - def _statistics(self, rule, start, end): - try: - # FIXME(sileht): In case of a heat autoscaling stack decide to - # delete an instance, the gnocchi metrics associated to this - # instance will be no more updated and when the alarm will ask - # for the aggregation, gnocchi will raise a 'No overlap' - # exception. - # So temporary set 'needed_overlap' to 0 to disable the - # gnocchi checks about missing points. For more detail see: - # https://bugs.launchpad.net/gnocchi/+bug/1479429 - return self._gnocchi_client.metric.aggregation( - metrics=rule['metrics'], - start=start, stop=end, - aggregation=rule['aggregation_method'], - needed_overlap=0) - except exceptions.MetricNotFound: - raise threshold.InsufficientDataError( - 'At least of metrics in %s does not exist' % - rule['metrics'], []) - except exceptions.NotFound: - # TODO(sileht): gnocchiclient should raise a explicit - # exception for AggregationNotFound, this API endpoint - # can only raise 3 different 404, so we are safe to - # assume this is an AggregationNotFound for now. - raise threshold.InsufficientDataError( - 'aggregation %s does not exist for at least one ' - 'metrics in %s' % (rule['aggregation_method'], - rule['metrics']), []) - except Exception as e: - msg = 'alarm statistics retrieval failed: %s' % e - LOG.warning(msg) - raise threshold.InsufficientDataError(msg, []) - - -class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase): - def _statistics(self, rule, start, end): - # FIXME(sileht): In case of a heat autoscaling stack decide to - # delete an instance, the gnocchi metrics associated to this - # instance will be no more updated and when the alarm will ask - # for the aggregation, gnocchi will raise a 'No overlap' - # exception. - # So temporary set 'needed_overlap' to 0 to disable the - # gnocchi checks about missing points. For more detail see: - # https://bugs.launchpad.net/gnocchi/+bug/1479429 - try: - return self._gnocchi_client.metric.aggregation( - metrics=rule['metric'], - query=jsonutils.loads(rule['query']), - resource_type=rule["resource_type"], - start=start, stop=end, - aggregation=rule['aggregation_method'], - needed_overlap=0, - ) - except exceptions.MetricNotFound: - raise threshold.InsufficientDataError( - 'metric %s does not exists' % rule['metric'], []) - except exceptions.NotFound: - # TODO(sileht): gnocchiclient should raise a explicit - # exception for AggregationNotFound, this API endpoint - # can only raise 3 different 404, so we are safe to - # assume this is an AggregationNotFound for now. - raise threshold.InsufficientDataError( - 'aggregation %s does not exist for at least one ' - 'metric of the query' % rule['aggregation_method'], []) - except Exception as e: - msg = 'alarm statistics retrieval failed: %s' % e - LOG.warning(msg) - raise threshold.InsufficientDataError(msg, []) diff --git a/aodh/evaluator/threshold.py b/aodh/evaluator/threshold.py deleted file mode 100644 index 884714b1..00000000 --- a/aodh/evaluator/threshold.py +++ /dev/null @@ -1,247 +0,0 @@ -# -# Copyright 2013-2015 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -import operator -import six - -from ceilometerclient import client as ceiloclient -from ceilometerclient import exc as ceiloexc -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils - -from aodh import evaluator -from aodh.evaluator import utils -from aodh.i18n import _ -from aodh import keystone_client - -LOG = log.getLogger(__name__) - -COMPARATORS = { - 'gt': operator.gt, - 'lt': operator.lt, - 'ge': operator.ge, - 'le': operator.le, - 'eq': operator.eq, - 'ne': operator.ne, -} - -OPTS = [ - cfg.IntOpt('additional_ingestion_lag', - min=0, - default=0, - help='The number of seconds to extend the evaluation windows ' - 'to compensate the reporting/ingestion lag.') -] - - -class InsufficientDataError(Exception): - def __init__(self, reason, statistics): - self.reason = reason - self.statistics = statistics - super(InsufficientDataError, self).__init__(reason) - - -class ThresholdEvaluator(evaluator.Evaluator): - - # the sliding evaluation window is extended to allow - # the reporting/ingestion lag this can be increased - # with 'additional_ingestion_lag' seconds if needed. - look_back = 1 - - def __init__(self, conf): - super(ThresholdEvaluator, self).__init__(conf) - self._cm_client = None - - @property - def cm_client(self): - if self._cm_client is None: - auth_config = self.conf.service_credentials - self._cm_client = ceiloclient.get_client( - version=2, - session=keystone_client.get_session(self.conf), - # ceiloclient adapter options - region_name=auth_config.region_name, - interface=auth_config.interface, - ) - - return self._cm_client - - def _bound_duration(self, rule): - """Bound the duration of the statistics query.""" - now = timeutils.utcnow() - # when exclusion of weak datapoints is enabled, we extend - # the look-back period so as to allow a clearer sample count - # trend to be established - look_back = (self.look_back if not rule.get('exclude_outliers') - else rule['evaluation_periods']) - window = ((rule.get('period', None) or rule['granularity']) - * (rule['evaluation_periods'] + look_back) + - self.conf.additional_ingestion_lag) - start = now - datetime.timedelta(seconds=window) - LOG.debug('query stats from %(start)s to ' - '%(now)s', {'start': start, 'now': now}) - return start.isoformat(), now.isoformat() - - @staticmethod - def _sanitize(rule, statistics): - """Sanitize statistics.""" - LOG.debug('sanitize stats %s', statistics) - if rule.get('exclude_outliers'): - key = operator.attrgetter('count') - mean = utils.mean(statistics, key) - stddev = utils.stddev(statistics, key, mean) - lower = mean - 2 * stddev - upper = mean + 2 * stddev - inliers, outliers = utils.anomalies(statistics, key, lower, upper) - if outliers: - LOG.debug('excluded weak datapoints with sample counts %s', - [s.count for s in outliers]) - statistics = inliers - else: - LOG.debug('no excluded weak datapoints') - - # in practice statistics are always sorted by period start, not - # strictly required by the API though - statistics = statistics[-rule['evaluation_periods']:] - result_statistics = [getattr(stat, rule['statistic']) - for stat in statistics] - LOG.debug('pruned statistics to %d', len(statistics)) - return result_statistics - - def _statistics(self, rule, start, end): - """Retrieve statistics over the current window.""" - after = dict(field='timestamp', op='ge', value=start) - before = dict(field='timestamp', op='le', value=end) - query = copy.copy(rule['query']) - query.extend([before, after]) - LOG.debug('stats query %s', query) - try: - return self.cm_client.statistics.list( - meter_name=rule['meter_name'], q=query, - period=rule['period']) - except Exception as e: - if isinstance(e, ceiloexc.HTTPException) and e.code == 410: - LOG.warning("This telemetry installation is not configured to " - "support alarm of type 'threshold', they should " - "be disabled or removed.") - else: - LOG.exception(_('alarm stats retrieval failed')) - return [] - - @staticmethod - def _reason_data(disposition, count, most_recent): - """Create a reason data dictionary for this evaluator type.""" - return {'type': 'threshold', 'disposition': disposition, - 'count': count, 'most_recent': most_recent} - - @classmethod - def _reason(cls, alarm, statistics, state, count): - """Fabricate reason string.""" - if state == evaluator.OK: - disposition = 'inside' - count = len(statistics) - count - else: - disposition = 'outside' - last = statistics[-1] if statistics else None - transition = alarm.state != state - reason_data = cls._reason_data(disposition, count, last) - if transition: - return ('Transition to %(state)s due to %(count)d samples' - ' %(disposition)s threshold, most recent:' - ' %(most_recent)s' % dict(reason_data, state=state), - reason_data) - return ('Remaining as %(state)s due to %(count)d samples' - ' %(disposition)s threshold, most recent: %(most_recent)s' - % dict(reason_data, state=state), reason_data) - - def evaluate_rule(self, alarm_rule): - """Evaluate alarm rule. - - :returns: state, trending state and statistics. - """ - start, end = self._bound_duration(alarm_rule) - statistics = self._statistics(alarm_rule, start, end) - statistics = self._sanitize(alarm_rule, statistics) - sufficient = len(statistics) >= alarm_rule['evaluation_periods'] - if not sufficient: - raise InsufficientDataError( - '%d datapoints are unknown' % alarm_rule['evaluation_periods'], - statistics) - - def _compare(value): - op = COMPARATORS[alarm_rule['comparison_operator']] - limit = alarm_rule['threshold'] - LOG.debug('comparing value %(value)s against threshold' - ' %(limit)s', {'value': value, 'limit': limit}) - return op(value, limit) - - compared = list(six.moves.map(_compare, statistics)) - distilled = all(compared) - unequivocal = distilled or not any(compared) - number_outside = len([c for c in compared if c]) - - if unequivocal: - state = evaluator.ALARM if distilled else evaluator.OK - return state, None, statistics, number_outside, None - else: - trending_state = evaluator.ALARM if compared[-1] else evaluator.OK - return None, trending_state, statistics, number_outside, None - - def _transition_alarm(self, alarm, state, trending_state, statistics, - outside_count, unknown_reason): - unknown = alarm.state == evaluator.UNKNOWN - continuous = alarm.repeat_actions - - if trending_state: - if unknown or continuous: - state = trending_state if unknown else alarm.state - reason, reason_data = self._reason(alarm, statistics, state, - outside_count) - self._refresh(alarm, state, reason, reason_data) - return - - if state == evaluator.UNKNOWN and not unknown: - LOG.warning('Expecting %(expected)d datapoints but only get ' - '%(actual)d' - % {'expected': alarm.rule['evaluation_periods'], - 'actual': len(statistics)}) - # Reason is not same as log message because we want to keep - # consistent since thirdparty software may depend on old format. - last = None if not statistics else statistics[-1] - reason_data = self._reason_data('unknown', - alarm.rule['evaluation_periods'], - last) - self._refresh(alarm, state, unknown_reason, reason_data) - - elif state and (alarm.state != state or continuous): - reason, reason_data = self._reason(alarm, statistics, state, - outside_count) - self._refresh(alarm, state, reason, reason_data) - - def evaluate(self, alarm): - if not self.within_time_constraint(alarm): - LOG.debug('Attempted to evaluate alarm %s, but it is not ' - 'within its time constraint.', alarm.alarm_id) - return - - try: - evaluation = self.evaluate_rule(alarm.rule) - except InsufficientDataError as e: - evaluation = (evaluator.UNKNOWN, None, e.statistics, 0, - e.reason) - self._transition_alarm(alarm, *evaluation) diff --git a/aodh/evaluator/utils.py b/aodh/evaluator/utils.py deleted file mode 100644 index 07c16bdb..00000000 --- a/aodh/evaluator/utils.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import math - - -def mean(s, key=lambda x: x): - """Calculate the mean of a numeric list.""" - count = float(len(s)) - if count: - return math.fsum(map(key, s)) / count - return 0.0 - - -def deltas(s, key, m=None): - """Calculate the squared distances from mean for a numeric list.""" - m = m or mean(s, key) - return [(key(i) - m) ** 2 for i in s] - - -def variance(s, key, m=None): - """Calculate the variance of a numeric list.""" - return mean(deltas(s, key, m)) - - -def stddev(s, key, m=None): - """Calculate the standard deviation of a numeric list.""" - return math.sqrt(variance(s, key, m)) - - -def outside(s, key, lower=0.0, upper=0.0): - """Determine if value falls outside upper and lower bounds.""" - v = key(s) - return v < lower or v > upper - - -def anomalies(s, key, lower=0.0, upper=0.0): - """Separate anomalous data points from the in-liers.""" - inliers = [] - outliers = [] - for i in s: - if outside(i, key, lower, upper): - outliers.append(i) - else: - inliers.append(i) - return inliers, outliers diff --git a/aodh/event.py b/aodh/event.py deleted file mode 100644 index 0ad94bdb..00000000 --- a/aodh/event.py +++ /dev/null @@ -1,70 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cotyledon -from oslo_config import cfg -from oslo_log import log -import oslo_messaging - -from aodh.evaluator import event -from aodh import messaging -from aodh import storage - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('event_alarm_topic', - default='alarm.all', - deprecated_group='DEFAULT', - help='The topic that aodh uses for event alarm evaluation.'), - cfg.IntOpt('batch_size', - default=1, - help='Number of notification messages to wait before ' - 'dispatching them.'), - cfg.IntOpt('batch_timeout', - help='Number of seconds to wait before dispatching samples ' - 'when batch_size is not reached (None means indefinitely).'), -] - - -class EventAlarmEndpoint(object): - - def __init__(self, evaluator): - self.evaluator = evaluator - - def sample(self, notifications): - LOG.debug('Received %s messages in batch.', len(notifications)) - for notification in notifications: - self.evaluator.evaluate_events(notification['payload']) - - -class EventAlarmEvaluationService(cotyledon.Service): - def __init__(self, worker_id, conf): - super(EventAlarmEvaluationService, self).__init__(worker_id) - self.conf = conf - self.storage_conn = storage.get_connection_from_config(self.conf) - self.evaluator = event.EventAlarmEvaluator(self.conf) - self.listener = messaging.get_batch_notification_listener( - messaging.get_transport(self.conf), - [oslo_messaging.Target( - topic=self.conf.listener.event_alarm_topic)], - [EventAlarmEndpoint(self.evaluator)], False, - self.conf.listener.batch_size, - self.conf.listener.batch_timeout) - self.listener.start() - - def terminate(self): - self.listener.stop() - self.listener.wait() diff --git a/aodh/i18n.py b/aodh/i18n.py deleted file mode 100644 index 63584e09..00000000 --- a/aodh/i18n.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2014 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See https://docs.openstack.org/oslo.i18n/latest/user/usage.html - -""" - -import oslo_i18n - -DOMAIN = 'aodh' - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -def translate(value, user_locale): - return oslo_i18n.translate(value, user_locale) - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/aodh/keystone_client.py b/aodh/keystone_client.py deleted file mode 100644 index 86fec643..00000000 --- a/aodh/keystone_client.py +++ /dev/null @@ -1,115 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from keystoneauth1 import exceptions as ka_exception -from keystoneauth1.identity.generic import password -from keystoneauth1 import loading as ka_loading -from keystoneauth1 import session -from keystoneclient.v3 import client as ks_client_v3 -from oslo_config import cfg - - -CFG_GROUP = "service_credentials" - - -def get_session(conf): - """Get an aodh service credentials auth session.""" - auth_plugin = ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) - return ka_loading.load_session_from_conf_options( - conf, CFG_GROUP, auth=auth_plugin - ) - - -def get_client(conf): - """Return a client for keystone v3 endpoint.""" - sess = get_session(conf) - return ks_client_v3.Client(session=sess) - - -def get_trusted_client(conf, trust_id): - # Ideally we would use load_session_from_conf_options, but we can't do that - # *and* specify a trust, so let's create the object manually. - auth_plugin = password.Password( - username=conf[CFG_GROUP].username, - password=conf[CFG_GROUP].password, - auth_url=conf[CFG_GROUP].auth_url, - user_domain_id=conf[CFG_GROUP].user_domain_id, - trust_id=trust_id) - - sess = session.Session(auth=auth_plugin) - return ks_client_v3.Client(session=sess) - - -def get_auth_token(client): - return client.session.auth.get_access(client.session).auth_token - - -def get_client_on_behalf_user(auth_plugin): - """Return a client for keystone v3 endpoint.""" - sess = session.Session(auth=auth_plugin) - return ks_client_v3.Client(session=sess) - - -def create_trust_id(conf, trustor_user_id, trustor_project_id, roles, - auth_plugin): - """Create a new trust using the aodh service user.""" - admin_client = get_client(conf) - trustee_user_id = admin_client.session.get_user_id() - - client = get_client_on_behalf_user(auth_plugin) - trust = client.trusts.create(trustor_user=trustor_user_id, - trustee_user=trustee_user_id, - project=trustor_project_id, - impersonation=True, - role_names=roles) - return trust.id - - -def delete_trust_id(trust_id, auth_plugin): - """Delete a trust previously setup for the aodh user.""" - client = get_client_on_behalf_user(auth_plugin) - try: - client.trusts.delete(trust_id) - except ka_exception.NotFound: - pass - - -OPTS = [ - cfg.StrOpt('region-name', - default=os.environ.get('OS_REGION_NAME'), - deprecated_name="os-region-name", - help='Region name to use for OpenStack service endpoints.'), - cfg.StrOpt('interface', - default=os.environ.get( - 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', - 'public')), - deprecated_name="os-endpoint-type", - choices=('public', 'internal', 'admin', 'auth', 'publicURL', - 'internalURL', 'adminURL'), - help='Type of endpoint in Identity service catalog to use for ' - 'communication with OpenStack services.'), -] - - -def register_keystoneauth_opts(conf): - ka_loading.register_auth_conf_options(conf, CFG_GROUP) - ka_loading.register_session_conf_options( - conf, CFG_GROUP, - deprecated_opts={'cacert': [ - cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), - cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] - }) diff --git a/aodh/locale/de/LC_MESSAGES/aodh.po b/aodh/locale/de/LC_MESSAGES/aodh.po deleted file mode 100644 index 9427cdc6..00000000 --- a/aodh/locale/de/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,148 +0,0 @@ -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -# Robert Simai , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-07 06:30+0000\n" -"Last-Translator: Robert Simai \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%(name)s count exceeds maximum value %(maximum)d" -msgstr "%(name)s Anzahl überschreitet den Maximalwert %(maximum)d" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s muss für den Alarmtyp %(type)s gesetzt sein" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s und %(rule2)s können nicht gleichzeitig festgelegt werden" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s ist nicht JSON-serialisierbar" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Alarm %(alarm_id)s nicht gefunden in Projekt %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Alarm %s nicht gefunden" - -msgid "Alarm incorrect" -msgstr "Alaram inkorrekt" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "Alarmquote überschritten für Benutzer %(u)s bei Projekt %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Alarm, wenn %(meter_name)s %(comparison_operator)s ein %(statistic)s von " -"%(threshold)s über %(period)s Sekunden ist" - -#, python-format -msgid "Alarm when %s event occurred." -msgstr "Alarm wenn %s Ereignis auftritt." - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Zeitmarkenwert %s konnte nicht analysiert werden" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filterausdruck nicht gültig: %s" - -msgid "Limit should be positive" -msgstr "Begrenzung muss positiv sein" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Nicht berechtigt für den Zugriff auf %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Benachrichtigung von Alarm %(alarm_name)s %(alarm_id)s mit Priorität " -"%(severity)s von %(previous)s in %(current)s mit Aktion %(action)s wegen " -"%(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Ausdruck für 'Sortieren nach' nicht gültig: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Der Datentyp %(type)s wird nicht unterstützt. Die Liste der unterstützten " -"Datentypen lautet: %(supported)s" - -msgid "Threshold rules should be combined with \"and\" or \"or\"" -msgstr "Schwellenregeln sollten mit \"und\" oder \"oder\" kombiniert werden." - -msgid "Time constraint names must be unique for a given alarm." -msgstr "Zeitvorgabennamen müssen für einen angegebenen Alarm eindeutig sein." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "Zeitzone %s ist nicht gültig" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt " -"werden." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Aktion %s konnte nicht analysiert werden" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten " -"Datentyp %(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Nicht unterstützte Aktion %s" - -#, python-format -msgid "You are not authorized to create action: %s" -msgstr "Sie sind nicht zur Erstellung der Aktion berechtigt: %s" - -msgid "alarm stats retrieval failed" -msgstr "Abrufen der Alarmstatistiken ist fehlgeschlagen" - -msgid "state invalid" -msgstr "Zustand ungültig" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp sollte ein datetime-Objekt sein" - -msgid "timestamp should be datetime object" -msgstr "timestamp sollte ein datetime-Objekt sein" - -msgid "type must be set in every rule" -msgstr "Typ muss in jeder Regel gesetzt werden" diff --git a/aodh/locale/en_GB/LC_MESSAGES/aodh.po b/aodh/locale/en_GB/LC_MESSAGES/aodh.po deleted file mode 100644 index 92a7db54..00000000 --- a/aodh/locale/en_GB/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,186 +0,0 @@ -# OpenStack Infra , 2015. #zanata -# Andi Chandler , 2016. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-05-20 10:23+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en-GB\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%(name)s count exceeds maximum value %(maximum)d" -msgstr "%(name)s count exceeds maximum value %(maximum)d" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s must be set for %(type)s type alarm" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s and %(rule2)s cannot be set at the same time" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s is not JSON serialisable" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Alarm %(alarm_id)s not found in project %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Alarm %s not found" - -msgid "Alarm incorrect" -msgstr "Alarm incorrect" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "Alarm quota exceeded for user %(u)s on project %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" - -#, python-format -msgid "Alarm when %s event occurred." -msgstr "Alarm when %s event occurred." - -#, python-format -msgid "" -"Composite rule alarm with composition form: %(expression)s remaining as " -"%(state)s, due to rules: %(rules)s %(description)s" -msgstr "" -"Composite rule alarm with composition form: %(expression)s remaining as " -"%(state)s, due to rules: %(rules)s %(description)s" - -#, python-format -msgid "" -"Composite rule alarm with composition form: %(expression)s transition to " -"%(state)s, due to rules: %(rules)s %(description)s" -msgstr "" -"Composite rule alarm with composition form: %(expression)s transition to " -"%(state)s, due to rules: %(rules)s %(description)s" - -#, python-format -msgid "" -"Event hits the query ." -msgstr "" -"Event hits the query ." - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Failed to parse the timestamp value %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filter expression not valid: %s" - -#, python-format -msgid "" -"Invalid input composite rule: %s, it should be a dict with an \"and\" or \"or" -"\" as key, and the value of dict should be a list of basic threshold rules " -"or sub composite rules, can be nested." -msgstr "" -"Invalid input composite rule: %s, it should be a dict with an \"and\" or \"or" -"\" as key, and the value of dict should be a list of basic threshold rules " -"or sub composite rules, can be nested." - -msgid "Limit should be positive" -msgstr "Limit should be positive" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Not Authorised to access %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Order-by expression not valid: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" - -msgid "Threshold rules should be combined with \"and\" or \"or\"" -msgstr "Threshold rules should be combined with \"and\" or \"or\"" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "Time constraint names must be unique for a given alarm." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "Timezone %s is not valid" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Unable to convert the value %(value)s to the expected data type %(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Unable to parse action %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Unsupported action %s" - -#, python-format -msgid "" -"Unsupported sub-rule type :%(rule)s in composite rule, should be one of: " -"%(plugins)s" -msgstr "" -"Unsupported sub-rule type :%(rule)s in composite rule, should be one of: " -"%(plugins)s" - -#, python-format -msgid "You are not authorized to create action: %s" -msgstr "You are not authorised to create action: %s" - -msgid "alarm stats retrieval failed" -msgstr "alarm stats retrieval failed" - -msgid "state invalid" -msgstr "state invalid" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp should be datetime object" - -msgid "timestamp should be datetime object" -msgstr "timestamp should be datetime object" - -msgid "type must be set in every rule" -msgstr "type must be set in every rule" diff --git a/aodh/locale/es/LC_MESSAGES/aodh.po b/aodh/locale/es/LC_MESSAGES/aodh.po deleted file mode 100644 index 4b9ab648..00000000 --- a/aodh/locale/es/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,128 +0,0 @@ -# Tom Cocozzello , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 04:26+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language-Team: Spanish\n" -"Language: es\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s debe establecerse para la alarma de tipo %(type)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s y %(rule2)s no se pueden establecer al mismo tiempo" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s no es serializable en JSON" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "La alarma %(alarm_id)s no se ha encontrado en el proyecto %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "No se ha encontrado la alarma %s" - -msgid "Alarm incorrect" -msgstr "Alarma incorrecta" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "" -"La cuota de alarma se ha excedido para el usuario %(u)s en el proyecto %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Alarma cuando %(meter_name)s es %(comparison_operator)s un %(statistic)s de " -"%(threshold)s por encima de %(period)s segundos" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "No se ha podido analizar el valor de indicación de fecha y hora %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expresión de filtro no válida: %s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "No está autorizado para acceder a %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Notificando la alarma %(alarm_name)s %(alarm_id)s de prioridad %(severity)s " -"de %(previous)s a %(current)s con la acción %(action)s debido a %(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expresión de ordenar por no válida: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"El tipo de datos %(type)s no es compatible. La lista de tipo de datos " -"admitido es: %(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "" -"Los nombres de restricción de tiempo deben ser exclusivos para una " -"determinada alarma." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "El huso horario %s no es válido" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"No se ha podido convertir el valor %(value)s al tipo de datos esperado " -"%(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "No se puede analizar la acción %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Excepción inesperada al convertir %(value)s al tipo de dato esperado " -"%(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Acción %s no admitida" - -msgid "alarm stats retrieval failed" -msgstr "ha fallado la recuperación de estadísticas de la alarma" - -msgid "state invalid" -msgstr "estado no válido" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp debe ser el objeto datetime" - -msgid "timestamp should be datetime object" -msgstr "" -"La indicación de fecha y hora debe ser el objeto datetime (fecha y hora)" diff --git a/aodh/locale/fr/LC_MESSAGES/aodh.po b/aodh/locale/fr/LC_MESSAGES/aodh.po deleted file mode 100644 index c3dca522..00000000 --- a/aodh/locale/fr/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,127 +0,0 @@ -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 04:26+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language-Team: French\n" -"Language: fr\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s doit être défini pour l'alarme de type %(type)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s et %(rule2)s ne peuvent pas être définis en même temps" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s n'est pas sérialisable en JSON" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Alarme %(alarm_id)s introuvable dans le projet %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Alarme: %s non trouvé" - -msgid "Alarm incorrect" -msgstr "Alarme incorrecte" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "Quota d'alarme dépassé pour l'utilisateur %(u)s sur le projet %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Alarme lorsque %(meter_name)s est %(comparison_operator)s à une " -"%(statistic)s de %(threshold)s de plus de %(period)s secondes" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Echec de l'analyse syntaxique de la valeur d'horodatage %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filtre de l'expression n'est pas valide: %s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Non autorisé à accéder %(aspect)s %(id)s " - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Notification de l'alarme %(alarm_name)s %(alarm_id)s de priorité " -"%(severity)s de %(previous)s à %(current)s avec l'action %(action)s. Cause : " -"%(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "L'expression de tri n'est pas valide : %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Le type de données %(type)s n'est pas supporté. Les types de données " -"supportés sont: %(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "" -"Les noms de contrainte de temps doivent être uniques pour une alarme donnée." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "La timezone %s n'est pas valide" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Impossible de convertir la valeur %(value)s vers le type de données attendu " -"%(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Impossible d'analyser l'action %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exception inattendue lors de la conversion de %(value)s dans le type de " -"donnée attendue %(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Action non supporté %s" - -msgid "alarm stats retrieval failed" -msgstr "Échec de la récupération de l'état de l'alerte" - -msgid "state invalid" -msgstr "Etat non valide" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp doit correspondre à l'objet date-heure" - -msgid "timestamp should be datetime object" -msgstr "timestamp doit correspondre à l'objet date-heure" diff --git a/aodh/locale/it/LC_MESSAGES/aodh.po b/aodh/locale/it/LC_MESSAGES/aodh.po deleted file mode 100644 index caf862b4..00000000 --- a/aodh/locale/it/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,126 +0,0 @@ -# Tom Cocozzello , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-03 06:58+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language-Team: Italian\n" -"Language: it\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s deve essere impostata per la segnalazione di tipo %(type)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s e %(rule2)s non possono essere impostate contemporaneamente" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s non è serializzabile mediante JSON" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Segnalazione %(alarm_id)s non trovata nel progetto %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Segnalazione %s non trovata" - -msgid "Alarm incorrect" -msgstr "Segnalazione non corretta" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "Quota di segnalazione superata per l'utente %(u)s nel progetto %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Segnalazione quando %(meter_name)s è %(comparison_operator)s un " -"%(statistic)s di %(threshold)s in %(period)s secondi" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Impossibile analizzare il valore data/ora %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Espressione del filtro non valida: %s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Non autorizzato ad accedere %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Notifica dell'allarme %(alarm_name)s %(alarm_id)s di priorità %(severity)s " -"da %(previous)s a %(current)s con azione %(action)s a causa di %(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "L'espressione ordina per non è valida: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati " -"supportati è: %(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "" -"I nomi dei limiti di tempo devono essere univoci per una data segnalazione." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "Fuso orario %s non valido" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Impossibile convertire il valore %(value)s nel tipo di dati previsto " -"%(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Impossibile analizzare l'azione %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Eccezione non prevista durante la conversione di %(value)s per il tipo di " -"dati previsto %(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Azione non supportata %s" - -msgid "alarm stats retrieval failed" -msgstr "segnalazione richiamo statistiche non riuscito" - -msgid "state invalid" -msgstr "stato non valido" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp deve essere un oggetto data/ora" - -msgid "timestamp should be datetime object" -msgstr "timestamp deve essere un oggetto data/ora" diff --git a/aodh/locale/ja/LC_MESSAGES/aodh.po b/aodh/locale/ja/LC_MESSAGES/aodh.po deleted file mode 100644 index 6cba5011..00000000 --- a/aodh/locale/ja/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,135 +0,0 @@ -# Akihiro Motoki , 2015. #zanata -# KATO Tomoyuki , 2015. #zanata -# Tom Cocozzello , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -# Shinichi Take , 2016. #zanata -# Yuta Hono , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 04:26+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language-Team: Japanese\n" -"Language: ja\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%(name)s count exceeds maximum value %(maximum)d" -msgstr "%(name)s が最大値 %(maximum)d を超えています" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(type)s タイプのアラームに %(rule)s を設定する必要があります" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s と %(rule2)s を同時に設定することはできません" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s が JSON シリアライズ可能ではありません" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "アラーム %(alarm_id)s がプロジェクト %(project)s には見つかりません" - -#, python-format -msgid "Alarm %s not found" -msgstr "アラーム %s が見つかりません" - -msgid "Alarm incorrect" -msgstr "アラームが正しくありません" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "プロジェクト %(p)s のユーザー %(u)s のアラームクォータを超過しました" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"%(period)s 秒にわたる %(meter_name)s と %(threshold)s の %(statistic)s の比較" -"が %(comparison_operator)s である場合のアラーム" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "タイムスタンプ値 %s を解析できませんでした" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "フィルター式が無効です: %s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "%(aspect)s %(id)s にアクセスする権限がありません" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"優先順位 %(severity)s のアラーム %(alarm_name)s %(alarm_id)s をアクション " -"%(action)s によって %(previous)s から %(current)s へ通知中。理由: " -"%(reason)s。" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "order-by 式が無効です: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"データ型 %(type)s はサポートされていません。サポートされているデータ型のリス" -"ト: %(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "時間制約の名前は、指定されたアラームで一意でなければなりません。" - -#, python-format -msgid "Timezone %s is not valid" -msgstr "タイムゾーン %s が無効です" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "値 %(value)s を、想定されるデータ型 %(type)s に変換できません。" - -#, python-format -msgid "Unable to parse action %s" -msgstr "アクション %s を解析できません" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"%(value)s を想定されるデータ型 %(type)s に変換する際に、想定しない例外が発生" -"しました。" - -#, python-format -msgid "Unsupported action %s" -msgstr "サポートされないアクション %s" - -#, python-format -msgid "You are not authorized to create action: %s" -msgstr "アクションの作成を許可されていません: %s" - -msgid "alarm stats retrieval failed" -msgstr "アラーム統計の取得に失敗しました" - -msgid "state invalid" -msgstr "状態が無効です" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp は datetime オブジェクトでなければなりません" - -msgid "timestamp should be datetime object" -msgstr "タイムスタンプは datetime オブジェクトでなければなりません" diff --git a/aodh/locale/ko_KR/LC_MESSAGES/aodh.po b/aodh/locale/ko_KR/LC_MESSAGES/aodh.po deleted file mode 100644 index 8f682395..00000000 --- a/aodh/locale/ko_KR/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,128 +0,0 @@ -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -# SEOKJAE BARK , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-07-06 01:10+0000\n" -"Last-Translator: SEOKJAE BARK \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(type)s 유형 알람에 %(rule)s을(를) 설정해야 함" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s 및 %(rule2)s을(를) 동시에 설정할 수 없음" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s은(는) JSON 직렬화 할 수 없음" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "%(alarm_id)s 알람이 %(project)s 프로젝트에 없음" - -#, python-format -msgid "Alarm %s not found" -msgstr "%s 알람을 찾을 수 없음" - -msgid "Alarm incorrect" -msgstr "알림이 올바르지 않습니다" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "%(p)s 프로젝트의 %(u)s 사용자에 대한 알람 할당량 초과" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"%(meter_name)s이(가) %(comparison_operator)s %(statistic)s %(threshold)s인 경" -"우 알람(%(period)s초 동안)" - -#, python-format -msgid "Alarm when %s event occurred." -msgstr "%s의 event가 발생했을 때 알람을 발생" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "시간소인 값 %s 구문 분석 실패" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "필터 표현식이 올바르지 않음: %s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "%(aspect)s %(id)s에 대한 액세스 권한이 부여되지 않음" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"%(severity)s 우선순위에 대한 알람 %(alarm_name)s %(alarm_id)s 알림, " -"%(previous)s부터 %(current)s까지, 조치 %(action)s 사용. 이유: %(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Order-by 표현식이 올바르지 않음: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"데이터 유형 %(type)s이(가) 지원되지 않습니다. 지원되는 데이터 유형 목록은 " -"%(supported)s입니다." - -msgid "Time constraint names must be unique for a given alarm." -msgstr "시간 제한조건 이름은 지정된 알람에 고유해야 합니다." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "시간대 %s이(가) 올바르지 않음" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "%(value)s 값을 예상 데이터 유형 %(type)s(으)로 변환할 수 없습니다." - -#, python-format -msgid "Unable to parse action %s" -msgstr "%s 조치를 구문 분석할 수 없음" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"%(value)s을(를) 예상된 데이터 유형으로 변환하는 중에 예상치 않은 예외 발생 " -"%(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "지원되지 않는 조치 %s" - -msgid "alarm stats retrieval failed" -msgstr "알람 통계 검색에 실패했습니다. " - -msgid "state invalid" -msgstr "상태가 잘못되었습니다" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp는 Datetime 오브젝트여야 함" - -msgid "timestamp should be datetime object" -msgstr "시간소인은 Datetime 오브젝트여야 함" diff --git a/aodh/locale/pt/LC_MESSAGES/aodh.po b/aodh/locale/pt/LC_MESSAGES/aodh.po deleted file mode 100644 index bfd5cea8..00000000 --- a/aodh/locale/pt/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,142 +0,0 @@ -# Translations template for aodh. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the aodh project. -# -# Translators: -# AnaFonseca , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 03:59+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: pt\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Portuguese\n" - -#, python-format -msgid "%(name)s count exceeds maximum value %(maximum)d" -msgstr "a contagem %(name)s excede o valor máximo %(maximum)d" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s devem ser definidas para o tipo de aviso %(type)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s e %(rule2)s não podem ser programadas ao mesmo tempo" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Alarme %(alarm_id)s não encontrado no projeto %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Alarme %s não encontrado" - -msgid "Alarm incorrect" -msgstr "Alarme incorreto" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "Aviso de quota excedida para o utilizador %(u)s no projeto %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Alarme quando %(meter_name)s é %(comparison_operator)s uma %(statistic)s de " -"%(threshold)s em %(period)s segundos" - -#, python-format -msgid "Alarm when %s event occurred." -msgstr "Alarme quando evento %s ocorreu." - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Erro ao analisar o valor data/hora %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expressão filtro inválida: %s" - -msgid "Limit should be positive" -msgstr "O limite deve ser positivo" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Não Autorizado o acesso a %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Notificar alarme %(alarm_name)s %(alarm_id)s de %(severity)s prioridade de " -"%(previous)s a %(current)s com a ação %(action)s devido a %(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expressão ordenar por inválida: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"O tipo de dados %(type)s não é suportado. A lista do tipo de dados " -"suportados é: %(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "" -"Os nomes das restrições de tempo deve ser únicos para um determinado aviso." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "Fuso horário %s inválido" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Incapaz de converter o valor %(value)s para o tipo de dados esperados " -"%(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Incapaz de analisar a ação %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exceção inesperada ao converter %(value)s para o tipo de dados esperado " -"%(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Ação não suportada %s" - -#, python-format -msgid "You are not authorized to create action: %s" -msgstr "Não tem permissão para criar a ação: %s" - -msgid "alarm stats retrieval failed" -msgstr "a extração da estatística do alarme falhou" - -msgid "state invalid" -msgstr "estato inválido" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp deve ser um objeto data/hora" - -msgid "timestamp should be datetime object" -msgstr "o timestamp deve ser um objeto data/hora" diff --git a/aodh/locale/pt_BR/LC_MESSAGES/aodh.po b/aodh/locale/pt_BR/LC_MESSAGES/aodh.po deleted file mode 100644 index 1e59c149..00000000 --- a/aodh/locale/pt_BR/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,127 +0,0 @@ -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-03 06:59+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language-Team: Portuguese (Brazil)\n" -"Language: pt-BR\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s deve ser definido para alarme de tipo %(type)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s e %(rule2)s não podem ser configurados ao mesmo tempo" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s não é JSON serializável" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Alarme%(alarm_id)s não localizado no projeto%(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Alarme %s não localizado" - -msgid "Alarm incorrect" -msgstr "Alarme incorreto" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "Cota de alarme excedida para usuário %(u)s no projeto %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"Alarma quando %(meter_name)s é %(comparison_operator)s que %(statistic)s de " -"%(threshold)s durante %(period)s segundos" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Falha ao analisar o valor do registro de data e hora %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expressão de filtro inválida: %s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Não Autorizado a acessar %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Notificando alarme %(alarm_name)s %(alarm_id)s da prioridade %(severity)s do " -"%(previous)s para %(current)s com ação %(action)s porque %(reason)s." - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expressão solicitada inválida: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"O tipo de dados %(type)s não é suportado. A lista de tipos de dados " -"suportados é: %(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "" -"Nomes de restrição de tempo devem ser exclusivos para um determinado alarme." - -#, python-format -msgid "Timezone %s is not valid" -msgstr "Fuso horário %s não é válido" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Não é possível converter o valor %(value)s para o tipo de dados esperado " -"%(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Não é possível analisar ação %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exceção inesperada convertendo %(value)s para o tipo de dado esperado " -"%(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "Ação não suportada %s" - -msgid "alarm stats retrieval failed" -msgstr "recuperação das estatísticas de alarme falhou" - -msgid "state invalid" -msgstr "estado inválido" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp precisa ser objeto de data/hora" - -msgid "timestamp should be datetime object" -msgstr "registro de data e hora precisa ser objeto de data/hora" diff --git a/aodh/locale/ru/LC_MESSAGES/aodh.po b/aodh/locale/ru/LC_MESSAGES/aodh.po deleted file mode 100644 index 69ba38dd..00000000 --- a/aodh/locale/ru/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,146 +0,0 @@ -# Translations template for aodh. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the aodh project. -# -# Translators: -# Altinbek , 2015 -# Lucas Palm , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 03:59+0000\n" -"Last-Translator: Copied by Zanata \n" -"Language: ru\n" -"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" -"%100>=11 && n%100<=14)? 2 : 3);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.9.6\n" -"Language-Team: Russian\n" - -#, python-format -msgid "%(name)s count exceeds maximum value %(maximum)d" -msgstr "контент %(name)s превышает количество символов в %(maximum)d" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "%(rule)s должны быть установлены для %(type)s сигналов тревоги" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s и %(rule2)s не могут работать одновременно" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s не является сериализуемым с помощью JSON" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "Сигнал %(alarm_id)s не найдены в проекте %(project)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "Сигнал %s не найден" - -msgid "Alarm incorrect" -msgstr "Сигнализация неисправна" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "количество ошибок пользователем %(u)s превысила норму %(p)s" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"При срабатываемости сигналатревоги %(meter_name)s как " -"%(comparison_operator)s a %(statistic)s в %(threshold)s срабатывает за " -"%(period)s секунду" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Не удалось разобрать значение временной метки %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Фильтр ввода не действует: %s" - -msgid "Limit should be positive" -msgstr "Лимит должен быть точным" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Нет доступа к %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"Сигнал тревоги %(alarm_name)s %(alarm_id)s не работает потому что " -"%(reason)s в %(severity)s приоритетом на %(previous)s %(current)s " -"влияние на действие %(action)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "вызов значения не активна: %s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Тип данных %(type)s не поддерживается. Список поддерживаемых типов данных: " -"%(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "Название временного контента должна отличаться для сигнала превоги" - -#, python-format -msgid "Timezone %s is not valid" -msgstr "таймер %s не актевирован" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Невозможно преобразовать значение %(value)s с ожидаемым типом данных " -"%(type)s." - -#, python-format -msgid "Unable to parse action %s" -msgstr "Невозможно разобрать действий %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"мгновенное преобразования значения %(value)s с ожидаемым типом данных " -"%(type)s." - -#, python-format -msgid "Unsupported action %s" -msgstr "не поддерживается действие %s" - -#, python-format -msgid "You are not authorized to create action: %s" -msgstr "Вы не авторизованы, чтобы деиствовать: %s" - -msgid "alarm stats retrieval failed" -msgstr "Статистика сигнал оповещения не получен" - -msgid "state invalid" -msgstr "Неправильное состояние" - -msgid "state_timestamp should be datetime object" -msgstr "В state_timestamp должен быть указан дата объекта" - -msgid "timestamp should be datetime object" -msgstr "должна быть указана дата вывода объекта" diff --git a/aodh/locale/zh_CN/LC_MESSAGES/aodh.po b/aodh/locale/zh_CN/LC_MESSAGES/aodh.po deleted file mode 100644 index 03ce1ee0..00000000 --- a/aodh/locale/zh_CN/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,128 +0,0 @@ -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-17 02:24+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language-Team: Chinese (China)\n" -"Language: zh-CN\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%(name)s count exceeds maximum value %(maximum)d" -msgstr "%(name)s数量超过最大值%(maximum)d" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "类型为%(type)s的告警必须设置%(rule)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "%(rule1)s和%(rule2)s无法同时被设置" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s 不是可序列化 JSON" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "告警%(alarm_id)s在项目%(project)s中未找到" - -#, python-format -msgid "Alarm %s not found" -msgstr "告警%s没有找到" - -msgid "Alarm incorrect" -msgstr "警报不正确" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "用户%(u)s在项目%(p)s中的告警配额已溢出" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"请在 %(meter_name)s 是 %(comparison_operator)s(%(threshold)s 的 " -"%(statistic)s)的时间超过 %(period)s 秒时发出警报" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "解析时间戳%s失败" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "过滤表达式不合法:%s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "权限不足以访问%(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"正在通知警报%(alarm_name)s %(alarm_id)s,警报级别%(severity)s,状态" -"从%(previous)s变为%(current)s,动作为%(action)s,原因是%(reason)s。" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "orderby表达式不合法:%s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "数据类型%(type)s不被支持。支持的数据类型列表:%(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "一个指定的告警的时间约束名称必须唯一" - -#, python-format -msgid "Timezone %s is not valid" -msgstr "时区%s不合法" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "无法转换%(value)s到预期的数据类型%(type)s。" - -#, python-format -msgid "Unable to parse action %s" -msgstr "无法解析动作%s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "在转换%(value)s到预期的数据类型%(type)s时发生了未预料的异常。" - -#, python-format -msgid "Unsupported action %s" -msgstr "动作%s不支持" - -#, python-format -msgid "You are not authorized to create action: %s" -msgstr "你没有权限创建动作:%s" - -msgid "alarm stats retrieval failed" -msgstr "警报统计信息获取失败" - -msgid "state invalid" -msgstr "状态无效" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp必须是datetime对象" - -msgid "timestamp should be datetime object" -msgstr "timestamp必须是datatime对象" diff --git a/aodh/locale/zh_TW/LC_MESSAGES/aodh.po b/aodh/locale/zh_TW/LC_MESSAGES/aodh.po deleted file mode 100644 index b671b61d..00000000 --- a/aodh/locale/zh_TW/LC_MESSAGES/aodh.po +++ /dev/null @@ -1,119 +0,0 @@ -# Lucas Palm , 2015. #zanata -# Jennifer , 2016. #zanata -# KATO Tomoyuki , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: aodh 4.0.1.dev87\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-03 07:04+0000\n" -"Last-Translator: KATO Tomoyuki \n" -"Language-Team: Chinese (Taiwan)\n" -"Language: zh-TW\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%(rule)s must be set for %(type)s type alarm" -msgstr "必須為 %(type)s 類型警示設定 %(rule)s" - -#, python-format -msgid "%(rule1)s and %(rule2)s cannot be set at the same time" -msgstr "無法同時設定 %(rule1)s 和 %(rule2)s" - -#, python-format -msgid "%s is not JSON serializable" -msgstr "%s 不可進行 JSON 序列化" - -#, python-format -msgid "Alarm %(alarm_id)s not found in project %(project)s" -msgstr "在專案 %(project)s 中找不到警示 %(alarm_id)s" - -#, python-format -msgid "Alarm %s not found" -msgstr "找不到警示 %s" - -msgid "Alarm incorrect" -msgstr "警示不正確" - -#, python-format -msgid "Alarm quota exceeded for user %(u)s on project %(p)s" -msgstr "在專案 %(p)s 上,針對使用者 %(u)s 已超出的警示配額" - -#, python-format -msgid "" -"Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " -"%(threshold)s over %(period)s seconds" -msgstr "" -"如果 %(meter_name)s 在 %(period)s 秒內 %(comparison_operator)s %(threshold)s " -"的%(statistic)s,則會出現警示" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "無法剖析時間戳記值 %s" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "過濾表示式無效:%s" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "未獲授權來存取 %(aspect)s %(id)s" - -#, python-format -msgid "" -"Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " -"%(previous)s to %(current)s with action %(action)s because %(reason)s." -msgstr "" -"正在以動作 %(action)s 通知優先順序為 %(severity)s 的警示 %(alarm_name)s " -"%(alarm_id)s(從 %(previous)s 至 %(current)s),因為 %(reason)s。" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "排序方式表示式無效:%s" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "不支援資料類型 %(type)s。支援的資料類型清單為:%(supported)s" - -msgid "Time constraint names must be unique for a given alarm." -msgstr "針對給定的警示,時間限制名稱必須是唯一的。" - -#, python-format -msgid "Timezone %s is not valid" -msgstr "時區 %s 無效" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "無法將值 %(value)s 轉換成預期的資料類型 %(type)s。" - -#, python-format -msgid "Unable to parse action %s" -msgstr "無法剖析動作 %s" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "將 %(value)s 轉換為預期的資料類型%(type)s 時發生非預期的異常狀況。" - -#, python-format -msgid "Unsupported action %s" -msgstr "不受支援的動作 %s" - -msgid "alarm stats retrieval failed" -msgstr "警示統計資料擷取失敗" - -msgid "state invalid" -msgstr "狀態無效" - -msgid "state_timestamp should be datetime object" -msgstr "state_timestamp 應該為日期時間物件" - -msgid "timestamp should be datetime object" -msgstr "時間戳記應該為日期時間物件" diff --git a/aodh/messaging.py b/aodh/messaging.py deleted file mode 100644 index 958e1de2..00000000 --- a/aodh/messaging.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_messaging -from oslo_messaging import serializer as oslo_serializer - -DEFAULT_URL = "__default__" -TRANSPORTS = {} -_SERIALIZER = oslo_serializer.JsonPayloadSerializer() - - -def setup(): - oslo_messaging.set_transport_defaults('aodh') - - -def get_transport(conf, url=None, optional=False, cache=True): - """Initialise the oslo_messaging layer.""" - global TRANSPORTS, DEFAULT_URL - cache_key = url or DEFAULT_URL - transport = TRANSPORTS.get(cache_key) - if not transport or not cache: - try: - transport = oslo_messaging.get_notification_transport(conf, url) - except (oslo_messaging.InvalidTransportURL, - oslo_messaging.DriverLoadFailure): - if not optional or url: - # NOTE(sileht): oslo_messaging is configured but unloadable - # so reraise the exception - raise - return None - else: - if cache: - TRANSPORTS[cache_key] = transport - return transport - - -def get_batch_notification_listener(transport, targets, endpoints, - allow_requeue=False, - batch_size=1, batch_timeout=None): - """Return a configured oslo_messaging notification listener.""" - return oslo_messaging.get_batch_notification_listener( - transport, targets, endpoints, executor='threading', - allow_requeue=allow_requeue, - batch_size=batch_size, batch_timeout=batch_timeout) - - -def get_notifier(transport, publisher_id): - """Return a configured oslo_messaging notifier.""" - notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) - return notifier.prepare(publisher_id=publisher_id) diff --git a/aodh/notifier/__init__.py b/aodh/notifier/__init__.py deleted file mode 100644 index dd2ae865..00000000 --- a/aodh/notifier/__init__.py +++ /dev/null @@ -1,164 +0,0 @@ -# -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import cotyledon -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import netutils -import six -from stevedore import extension - -from aodh import messaging - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.IntOpt('batch_size', - default=1, - help='Number of notification messages to wait before ' - 'dispatching them.'), - cfg.IntOpt('batch_timeout', - help='Number of seconds to wait before dispatching samples ' - 'when batch_size is not reached (None means indefinitely).' - ), -] - - -@six.add_metaclass(abc.ABCMeta) -class AlarmNotifier(object): - """Base class for alarm notifier plugins.""" - - @staticmethod - def __init__(conf): - pass - - @abc.abstractmethod - def notify(self, action, alarm_id, alarm_name, severity, previous, - current, reason, reason_data): - """Notify that an alarm has been triggered. - - :param action: The action that is being attended, as a parsed URL. - :param alarm_id: The triggered alarm. - :param alarm_name: The name of triggered alarm. - :param severity: The level of triggered alarm - :param previous: The previous state of the alarm. - :param current: The current state of the alarm. - :param reason: The reason the alarm changed its state. - :param reason_data: A dict representation of the reason. - """ - - -class AlarmNotifierService(cotyledon.Service): - NOTIFIER_EXTENSIONS_NAMESPACE = "aodh.notifier" - - def __init__(self, worker_id, conf): - super(AlarmNotifierService, self).__init__(worker_id) - self.conf = conf - transport = messaging.get_transport(self.conf) - self.notifiers = extension.ExtensionManager( - self.NOTIFIER_EXTENSIONS_NAMESPACE, - invoke_on_load=True, - invoke_args=(self.conf,)) - - target = oslo_messaging.Target(topic=self.conf.notifier_topic) - self.listener = messaging.get_batch_notification_listener( - transport, [target], [AlarmEndpoint(self.notifiers)], False, - self.conf.notifier.batch_size, self.conf.notifier.batch_timeout) - self.listener.start() - - def terminate(self): - self.listener.stop() - self.listener.wait() - - -class AlarmEndpoint(object): - - def __init__(self, notifiers): - self.notifiers = notifiers - - def sample(self, notifications): - """Endpoint for alarm notifications""" - LOG.debug('Received %s messages in batch.', len(notifications)) - for notification in notifications: - self._process_alarm(self.notifiers, notification['payload']) - - @staticmethod - def _handle_action(notifiers, action, alarm_id, alarm_name, severity, - previous, current, reason, reason_data): - """Process action on alarm - - :param notifiers: list of possible notifiers. - :param action: The action that is being attended, as a parsed URL. - :param alarm_id: The triggered alarm. - :param alarm_name: The name of triggered alarm. - :param severity: The level of triggered alarm - :param previous: The previous state of the alarm. - :param current: The current state of the alarm. - :param reason: The reason the alarm changed its state. - :param reason_data: A dict representation of the reason. - """ - - try: - action = netutils.urlsplit(action) - except Exception: - LOG.error( - ("Unable to parse action %(action)s for alarm " - "%(alarm_id)s"), {'action': action, 'alarm_id': alarm_id}) - return - - try: - notifier = notifiers[action.scheme].obj - except KeyError: - scheme = action.scheme - LOG.error( - ("Action %(scheme)s for alarm %(alarm_id)s is unknown, " - "cannot notify"), - {'scheme': scheme, 'alarm_id': alarm_id}) - return - - try: - LOG.debug("Notifying alarm %(id)s with action %(act)s", - {'id': alarm_id, 'act': action}) - notifier.notify(action, alarm_id, alarm_name, severity, - previous, current, reason, reason_data) - except Exception: - LOG.exception("Unable to notify alarm %s", alarm_id) - - @staticmethod - def _process_alarm(notifiers, data): - """Notify that alarm has been triggered. - - :param notifiers: list of possible notifiers - :param data: (dict): alarm data - """ - - actions = data.get('actions') - if not actions: - LOG.error("Unable to notify for an alarm with no action") - return - - for action in actions: - AlarmEndpoint._handle_action(notifiers, action, - data.get('alarm_id'), - data.get('alarm_name'), - data.get('severity'), - data.get('previous'), - data.get('current'), - data.get('reason'), - data.get('reason_data')) diff --git a/aodh/notifier/log.py b/aodh/notifier/log.py deleted file mode 100644 index bfc215dd..00000000 --- a/aodh/notifier/log.py +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Log alarm notifier.""" - -from oslo_log import log - -from aodh.i18n import _ -from aodh import notifier - -LOG = log.getLogger(__name__) - - -class LogAlarmNotifier(notifier.AlarmNotifier): - "Log alarm notifier.""" - - @staticmethod - def notify(action, alarm_id, alarm_name, severity, previous, current, - reason, reason_data): - LOG.info(_( - "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " - "priority from %(previous)s to %(current)s with action %(action)s" - " because %(reason)s.") % ({'alarm_name': alarm_name, - 'alarm_id': alarm_id, - 'severity': severity, - 'previous': previous, - 'current': current, - 'action': action.geturl(), - 'reason': reason})) diff --git a/aodh/notifier/rest.py b/aodh/notifier/rest.py deleted file mode 100644 index 01da584e..00000000 --- a/aodh/notifier/rest.py +++ /dev/null @@ -1,109 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Rest alarm notifier.""" - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import requests -import six.moves.urllib.parse as urlparse - -from aodh import notifier - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('rest_notifier_certificate_file', - default='', - help='SSL Client certificate file for REST notifier.' - ), - cfg.StrOpt('rest_notifier_certificate_key', - default='', - help='SSL Client private key file for REST notifier.' - ), - cfg.StrOpt('rest_notifier_ca_bundle_certificate_path', - help='SSL CA_BUNDLE certificate for REST notifier', - ), - cfg.BoolOpt('rest_notifier_ssl_verify', - default=True, - help='Whether to verify the SSL Server certificate when ' - 'calling alarm action.' - ), - cfg.IntOpt('rest_notifier_max_retries', - default=0, - help='Number of retries for REST notifier', - ), - -] - - -class RestAlarmNotifier(notifier.AlarmNotifier): - """Rest alarm notifier.""" - - def __init__(self, conf): - super(RestAlarmNotifier, self).__init__(conf) - self.conf = conf - - def notify(self, action, alarm_id, alarm_name, severity, previous, - current, reason, reason_data, headers=None): - headers = headers or {} - if 'x-openstack-request-id' not in headers: - headers['x-openstack-request-id'] = b'req-' + \ - uuidutils.generate_uuid().encode('ascii') - - LOG.info( - "Notifying alarm %(alarm_name)s %(alarm_id)s with severity" - " %(severity)s from %(previous)s to %(current)s with action " - "%(action)s because %(reason)s. request-id: %(request_id)s " % - ({'alarm_name': alarm_name, 'alarm_id': alarm_id, - 'severity': severity, 'previous': previous, - 'current': current, 'action': action, 'reason': reason, - 'request_id': headers['x-openstack-request-id']})) - body = {'alarm_name': alarm_name, 'alarm_id': alarm_id, - 'severity': severity, 'previous': previous, - 'current': current, 'reason': reason, - 'reason_data': reason_data} - headers['content-type'] = 'application/json' - kwargs = {'data': jsonutils.dumps(body), - 'headers': headers} - - if action.scheme == 'https': - default_verify = int(self.conf.rest_notifier_ssl_verify) - options = urlparse.parse_qs(action.query) - verify = bool(int(options.get('aodh-alarm-ssl-verify', - [default_verify])[-1])) - if verify and self.conf.rest_notifier_ca_bundle_certificate_path: - verify = self.conf.rest_notifier_ca_bundle_certificate_path - kwargs['verify'] = verify - - cert = self.conf.rest_notifier_certificate_file - key = self.conf.rest_notifier_certificate_key - if cert: - kwargs['cert'] = (cert, key) if key else cert - - # FIXME(rhonjo): Retries are automatically done by urllib3 in requests - # library. However, there's no interval between retries in urllib3 - # implementation. It will be better to put some interval between - # retries (future work). - max_retries = self.conf.rest_notifier_max_retries - session = requests.Session() - session.mount(action.geturl(), - requests.adapters.HTTPAdapter(max_retries=max_retries)) - resp = session.post(action.geturl(), **kwargs) - LOG.info('Notifying alarm <%(id)s> gets response: %(status_code)s ' - '%(reason)s.', {'id': alarm_id, - 'status_code': resp.status_code, - 'reason': resp.reason}) diff --git a/aodh/notifier/test.py b/aodh/notifier/test.py deleted file mode 100644 index 0a316fd4..00000000 --- a/aodh/notifier/test.py +++ /dev/null @@ -1,36 +0,0 @@ -# -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test alarm notifier.""" - -from aodh import notifier - - -class TestAlarmNotifier(notifier.AlarmNotifier): - "Test alarm notifier.""" - - def __init__(self, conf): - super(TestAlarmNotifier, self).__init__(conf) - self.notifications = [] - - def notify(self, action, alarm_id, alarm_name, severity, - previous, current, reason, reason_data): - self.notifications.append((action, - alarm_id, - alarm_name, - severity, - previous, - current, - reason, - reason_data)) diff --git a/aodh/notifier/trust.py b/aodh/notifier/trust.py deleted file mode 100644 index 0cf24c34..00000000 --- a/aodh/notifier/trust.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Rest alarm notifier with trusted authentication.""" - -from six.moves.urllib import parse - -from aodh import keystone_client -from aodh.notifier import rest - - -class TrustAlarmNotifierMixin(object): - """Mixin class to add Keystone trust support to an AlarmNotifier. - - Provides a notify() method that interprets the trust ID and then calls - the parent class's notify(), passing the necessary authentication data in - the headers. - """ - - def notify(self, action, alarm_id, alarm_name, severity, previous, current, - reason, reason_data): - trust_id = action.username - - client = keystone_client.get_trusted_client(self.conf, trust_id) - - # Remove the fake user - netloc = action.netloc.split("@")[1] - # Remove the trust prefix - scheme = action.scheme[6:] - - action = parse.SplitResult(scheme, netloc, action.path, action.query, - action.fragment) - - headers = {'X-Auth-Token': keystone_client.get_auth_token(client)} - super(TrustAlarmNotifierMixin, self).notify( - action, alarm_id, alarm_name, severity, previous, current, reason, - reason_data, headers) - - -class TrustRestAlarmNotifier(TrustAlarmNotifierMixin, rest.RestAlarmNotifier): - """Notifier supporting keystone trust authentication. - - This alarm notifier is intended to be used to call an endpoint using - keystone authentication. It uses the aodh service user to - authenticate using the trust ID provided. - - The URL must be in the form ``trust+http://trust-id@host/action``. - """ diff --git a/aodh/notifier/zaqar.py b/aodh/notifier/zaqar.py deleted file mode 100644 index e3a30a9a..00000000 --- a/aodh/notifier/zaqar.py +++ /dev/null @@ -1,227 +0,0 @@ -# -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Zaqar alarm notifier.""" - -from oslo_config import cfg -from oslo_log import log -import six.moves.urllib.parse as urlparse - -from aodh import keystone_client -from aodh import notifier -from aodh.notifier import trust - -LOG = log.getLogger(__name__) - - -SERVICE_OPTS = [ - cfg.StrOpt('zaqar', - default='messaging', - help='Message queue service type.'), -] - - -class ZaqarAlarmNotifier(notifier.AlarmNotifier): - """Zaqar notifier. - - This notifier posts alarm notifications either to a Zaqar subscription or - to an existing Zaqar queue with a pre-signed URL. - - To create a new subscription in the service project, use a notification URL - of the form:: - - zaqar://?topic=example&subscriber=mailto%3A//test%40example.com&ttl=3600 - - Multiple subscribers are allowed. ``ttl`` is the time to live of the - subscription. The queue will be created automatically, in the service - project, with a name based on the topic and the alarm ID. - - To use a pre-signed URL for an existing queue, use a notification URL with - the scheme ``zaqar://`` and the pre-signing data from Zaqar in the query - string:: - - zaqar://?queue_name=example&project_id=foo& - paths=/messages&methods=POST&expires=1970-01-01T00:00Z& - signature=abcdefg - """ - - def __init__(self, conf): - super(ZaqarAlarmNotifier, self).__init__(conf) - self.conf = conf - self._zclient = None - self._zendpoint = None - - def _get_endpoint(self): - if self._zendpoint is None: - try: - ks_client = keystone_client.get_client(self.conf) - z_srv = ks_client.services.find( - type=self.conf.service_types.zaqar) - endpoint_type = self.conf.service_credentials.interface - z_endpoint = ks_client.endpoints.find(service_id=z_srv.id, - interface=endpoint_type) - self._zendpoint = z_endpoint.url - except Exception: - LOG.error("Aodh was configured to use zaqar:// action," - " but Zaqar endpoint could not be found in" - " Keystone service catalog.") - return self._zendpoint - - def _get_client_conf(self): - conf = self.conf.service_credentials - return { - 'auth_opts': { - 'backend': 'keystone', - 'options': { - 'os_username': conf.os_username, - 'os_password': conf.os_password, - 'os_project_name': conf.os_tenant_name, - 'os_auth_url': conf.os_auth_url, - 'insecure': '' - } - } - } - - def get_zaqar_client(self, conf): - try: - from zaqarclient.queues import client as zaqar_client - return zaqar_client.Client(self._get_endpoint(), - version=2, conf=conf) - except Exception: - LOG.error("Failed to connect to Zaqar service ", - exc_info=True) - - def _get_presigned_client_conf(self, queue_info): - queue_name = queue_info.get('queue_name', [''])[0] - if not queue_name: - return None, None - - signature = queue_info.get('signature', [''])[0] - expires = queue_info.get('expires', [''])[0] - paths = queue_info.get('paths', [''])[0].split(',') - methods = queue_info.get('methods', [''])[0].split(',') - project_id = queue_info.get('project_id', [''])[0] - conf = { - 'auth_opts': { - 'backend': 'signed-url', - 'options': { - 'signature': signature, - 'expires': expires, - 'methods': methods, - 'paths': paths, - 'os_project_id': project_id - } - } - } - return conf, queue_name - - def notify(self, action, alarm_id, alarm_name, severity, previous, - current, reason, reason_data, headers=None): - LOG.info( - "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " - "priority from %(previous)s to %(current)s with action %(action)s" - " because %(reason)s." % ({'alarm_name': alarm_name, - 'alarm_id': alarm_id, - 'severity': severity, - 'previous': previous, - 'current': current, - 'action': action, - 'reason': reason})) - body = {'alarm_name': alarm_name, 'alarm_id': alarm_id, - 'severity': severity, 'previous': previous, - 'current': current, 'reason': reason, - 'reason_data': reason_data} - message = dict(body=body) - self.notify_zaqar(action, message, headers) - - @property - def client(self): - if self._zclient is None: - self._zclient = self.get_zaqar_client(self._get_client_conf()) - return self._zclient - - def notify_zaqar(self, action, message, headers=None): - queue_info = urlparse.parse_qs(action.query) - try: - # NOTE(flwang): Try to get build a pre-signed client if user has - # provide enough information about that. Otherwise, go to build - # a client with service account and queue name for this alarm. - conf, queue_name = self._get_presigned_client_conf(queue_info) - if conf is not None: - zaqar_client = self.get_zaqar_client(conf) - - if conf is None or queue_name is None or zaqar_client is None: - zaqar_client = self.client - # queue_name is a combination of - - queue_name = "%s-%s" % (message['body']['alarm_id'], - queue_info.get('topic')[-1]) - - # create a queue in zaqar - queue = zaqar_client.queue(queue_name) - - subscriber_list = queue_info.get('subscriber', []) - ttl = int(queue_info.get('ttl', ['3600'])[-1]) - for subscriber in subscriber_list: - # add subscriber to the zaqar queue - subscription_data = dict(subscriber=subscriber, - ttl=ttl) - zaqar_client.subscription(queue_name, **subscription_data) - # post the message to the queue - queue.post(message) - except IndexError: - LOG.error("Required query option missing in action %s", - action) - except Exception: - LOG.error("Unknown error occurred; Failed to post message to" - " Zaqar queue", - exc_info=True) - - -class TrustZaqarAlarmNotifier(trust.TrustAlarmNotifierMixin, - ZaqarAlarmNotifier): - """Zaqar notifier using a Keystone trust to post to user-defined queues. - - The URL must be in the form ``trust+zaqar://trust_id@?queue_name=example``. - """ - - def _get_client_conf(self, auth_token): - return { - 'auth_opts': { - 'backend': 'keystone', - 'options': { - 'os_auth_token': auth_token, - } - } - } - - def notify_zaqar(self, action, message, headers): - queue_info = urlparse.parse_qs(action.query) - try: - queue_name = queue_info.get('queue_name')[-1] - except IndexError: - LOG.error("Required 'queue_name' query option missing in" - " action %s", - action) - return - - try: - conf = self._get_client_conf(headers['X-Auth-Token']) - client = self.get_zaqar_client(conf) - queue = client.queue(queue_name) - queue.post(message) - except Exception: - LOG.error("Unknown error occurred; Failed to post message to" - " Zaqar queue", - exc_info=True) diff --git a/aodh/opts.py b/aodh/opts.py deleted file mode 100644 index 2f434e6f..00000000 --- a/aodh/opts.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import itertools - -from keystoneauth1 import loading - -import aodh.api -import aodh.api.controllers.v2.alarm_rules.gnocchi -import aodh.api.controllers.v2.alarms -import aodh.coordination -import aodh.evaluator -import aodh.evaluator.event -import aodh.evaluator.gnocchi -import aodh.event -import aodh.keystone_client -import aodh.notifier.rest -import aodh.notifier.zaqar -import aodh.service -import aodh.storage - - -def list_opts(): - return [ - ('DEFAULT', - itertools.chain( - aodh.evaluator.OPTS, - aodh.evaluator.event.OPTS, - aodh.evaluator.threshold.OPTS, - aodh.notifier.rest.OPTS, - aodh.queue.OPTS, - aodh.service.OPTS)), - ('api', - itertools.chain( - aodh.api.OPTS, - aodh.api.controllers.v2.alarm_rules.gnocchi.GNOCCHI_OPTS, - aodh.api.controllers.v2.alarms.ALARM_API_OPTS)), - ('coordination', aodh.coordination.OPTS), - ('database', aodh.storage.OPTS), - ('evaluator', aodh.service.EVALUATOR_OPTS), - ('listener', itertools.chain(aodh.service.LISTENER_OPTS, - aodh.event.OPTS)), - ('notifier', aodh.service.NOTIFIER_OPTS), - ('service_credentials', aodh.keystone_client.OPTS), - ('service_types', aodh.notifier.zaqar.SERVICE_OPTS), - ('notifier', aodh.notifier.OPTS), - ] - - -def list_keystoneauth_opts(): - # NOTE(sileht): the configuration file contains only the options - # for the password plugin that handles keystone v2 and v3 API - # with discovery. But other options are possible. - return [('service_credentials', ( - loading.get_auth_common_conf_options() + - loading.get_auth_plugin_conf_options('password')))] diff --git a/aodh/queue.py b/aodh/queue.py deleted file mode 100644 index 2a4db881..00000000 --- a/aodh/queue.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -import six - -from aodh import messaging -from aodh.storage import models - -OPTS = [ - cfg.StrOpt('notifier_topic', - default='alarming', - help='The topic that aodh uses for alarm notifier ' - 'messages.'), -] - -LOG = log.getLogger(__name__) - - -class AlarmNotifier(object): - def __init__(self, conf): - self.notifier = oslo_messaging.Notifier( - messaging.get_transport(conf), - driver='messagingv2', - publisher_id="alarming.evaluator", - topics=[conf.notifier_topic]) - - def notify(self, alarm, previous, reason, reason_data): - actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state]) - if not actions: - LOG.debug('alarm %(alarm_id)s has no action configured ' - 'for state transition from %(previous)s to ' - 'state %(state)s, skipping the notification.', - {'alarm_id': alarm.alarm_id, - 'previous': previous, - 'state': alarm.state}) - return - payload = {'actions': actions, - 'alarm_id': alarm.alarm_id, - 'alarm_name': alarm.name, - 'severity': alarm.severity, - 'previous': previous, - 'current': alarm.state, - 'reason': six.text_type(reason), - 'reason_data': reason_data} - self.notifier.sample({}, 'alarm.update', payload) diff --git a/aodh/service.py b/aodh/service.py deleted file mode 100644 index 5428aa88..00000000 --- a/aodh/service.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013-2017 Red Hat, Inc -# Copyright 2012-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from keystoneauth1 import loading as ka_loading -from oslo_config import cfg -from oslo_db import options as db_options -import oslo_i18n -from oslo_log import log -from oslo_policy import opts as policy_opts - -from aodh.conf import defaults -from aodh import keystone_client -from aodh import messaging - - -OPTS = [ - cfg.IntOpt('http_timeout', - default=600, - help='Timeout seconds for HTTP requests. Set it to None to ' - 'disable timeout.'), - cfg.IntOpt('evaluation_interval', - default=60, - help='Period of evaluation cycle, should' - ' be >= than configured pipeline interval for' - ' collection of underlying meters.'), -] - -EVALUATOR_OPTS = [ - cfg.IntOpt('workers', - default=1, - min=1, - help='Number of workers for evaluator service. ' - 'default value is 1.') -] - -NOTIFIER_OPTS = [ - cfg.IntOpt('workers', - default=1, - min=1, - help='Number of workers for notifier service. ' - 'default value is 1.') -] - -LISTENER_OPTS = [ - cfg.IntOpt('workers', - default=1, - min=1, - help='Number of workers for listener service. ' - 'default value is 1.') -] - - -def prepare_service(argv=None, config_files=None): - conf = cfg.ConfigOpts() - oslo_i18n.enable_lazy() - log.register_options(conf) - log_levels = (conf.default_log_levels + - ['futurist=INFO', 'keystoneclient=INFO']) - log.set_defaults(default_log_levels=log_levels) - defaults.set_cors_middleware_defaults() - db_options.set_defaults(conf) - policy_opts.set_defaults(conf, policy_file=os.path.abspath( - os.path.join(os.path.dirname(__file__), "api", "policy.json"))) - from aodh import opts - # Register our own Aodh options - for group, options in opts.list_opts(): - conf.register_opts(list(options), - group=None if group == "DEFAULT" else group) - keystone_client.register_keystoneauth_opts(conf) - - conf(argv, project='aodh', validate_default_values=True, - default_config_files=config_files) - - ka_loading.load_auth_from_conf_options(conf, "service_credentials") - log.setup(conf, 'aodh') - messaging.setup() - return conf diff --git a/aodh/storage/__init__.py b/aodh/storage/__init__.py deleted file mode 100644 index 4d11aecd..00000000 --- a/aodh/storage/__init__.py +++ /dev/null @@ -1,139 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Storage backend management -""" -import datetime - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import six.moves.urllib.parse as urlparse -from stevedore import driver -import tenacity - -_NAMESPACE = 'aodh.storage' - - -LOG = log.getLogger(__name__) - - -OPTS = [ - cfg.IntOpt('alarm_history_time_to_live', - default=-1, - help=("Number of seconds that alarm histories are kept " - "in the database for (<= 0 means forever).")), -] - - -class StorageBadVersion(Exception): - """Error raised when the storage backend version is not good enough.""" - - -class AlarmNotFound(Exception): - """Error raised when the needed resource not found.""" - - def __init__(self, alarm_id): - self.alarm_id = alarm_id - super(AlarmNotFound, self).__init__("Alarm %s not found" % alarm_id) - - -class InvalidMarker(Exception): - """Invalid pagination marker parameters""" - - -def get_connection_from_config(conf): - retries = conf.database.max_retries - url = conf.database.connection - connection_scheme = urlparse.urlparse(url).scheme - LOG.debug('looking for %(name)r driver in %(namespace)r', - {'name': connection_scheme, 'namespace': _NAMESPACE}) - mgr = driver.DriverManager(_NAMESPACE, connection_scheme) - - @tenacity.retry( - wait=tenacity.wait_fixed(conf.database.retry_interval), - stop=tenacity.stop_after_attempt(retries if retries >= 0 else 5), - reraise=True) - def _get_connection(): - """Return an open connection to the database.""" - return mgr.driver(conf, url) - - return _get_connection() - - -class SampleFilter(object): - """Holds the properties for building a query from a meter/sample filter. - - :param user: The sample owner. - :param project: The sample project. - :param start_timestamp: Earliest time point in the request. - :param start_timestamp_op: Earliest timestamp operation in the request. - :param end_timestamp: Latest time point in the request. - :param end_timestamp_op: Latest timestamp operation in the request. - :param resource: Optional filter for resource id. - :param meter: Optional filter for meter type using the meter name. - :param source: Optional source filter. - :param message_id: Optional sample_id filter. - :param metaquery: Optional filter on the metadata - """ - def __init__(self, user=None, project=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - resource=None, meter=None, - source=None, message_id=None, - metaquery=None): - self.user = user - self.project = project - self.start_timestamp = self.sanitize_timestamp(start_timestamp) - self.start_timestamp_op = start_timestamp_op - self.end_timestamp = self.sanitize_timestamp(end_timestamp) - self.end_timestamp_op = end_timestamp_op - self.resource = resource - self.meter = meter - self.source = source - self.metaquery = metaquery or {} - self.message_id = message_id - - @staticmethod - def sanitize_timestamp(timestamp): - """Return a naive utc datetime object.""" - if not timestamp: - return timestamp - if not isinstance(timestamp, datetime.datetime): - timestamp = timeutils.parse_isotime(timestamp) - return timeutils.normalize_time(timestamp) - - def __repr__(self): - return ("" % - (self.user, - self.project, - self.start_timestamp, - self.start_timestamp_op, - self.end_timestamp, - self.end_timestamp_op, - self.resource, - self.meter, - self.source, - self.metaquery, - self.message_id)) diff --git a/aodh/storage/base.py b/aodh/storage/base.py deleted file mode 100644 index f0ad8612..00000000 --- a/aodh/storage/base.py +++ /dev/null @@ -1,221 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base classes for storage engines -""" -import copy -import inspect - -import six - -import aodh - - -def update_nested(original_dict, updates): - """Updates the leaf nodes in a nest dict. - - Updates occur without replacing entire sub-dicts. - """ - dict_to_update = copy.deepcopy(original_dict) - for key, value in six.iteritems(updates): - if isinstance(value, dict): - sub_dict = update_nested(dict_to_update.get(key, {}), value) - dict_to_update[key] = sub_dict - else: - dict_to_update[key] = updates[key] - return dict_to_update - - -class Model(object): - """Base class for storage API models.""" - - def __init__(self, **kwds): - self.fields = list(kwds) - for k, v in six.iteritems(kwds): - setattr(self, k, v) - - def as_dict(self): - d = {} - for f in self.fields: - v = getattr(self, f) - if isinstance(v, Model): - v = v.as_dict() - elif isinstance(v, list) and v and isinstance(v[0], Model): - v = [sub.as_dict() for sub in v] - d[f] = v - return d - - def __eq__(self, other): - return self.as_dict() == other.as_dict() - - def __ne__(self, other): - return not self.__eq__(other) - - @classmethod - def get_field_names(cls): - fields = inspect.getargspec(cls.__init__)[0] - return set(fields) - set(["self"]) - - -class Connection(object): - """Base class for alarm storage system connections.""" - - # A dictionary representing the capabilities of this driver. - CAPABILITIES = { - 'alarms': {'query': {'simple': False, - 'complex': False}, - 'history': {'query': {'simple': False, - 'complex': False}}}, - } - - STORAGE_CAPABILITIES = { - 'storage': {'production_ready': False}, - } - - def __init__(self, conf, url): - pass - - @staticmethod - def upgrade(): - """Migrate the database to `version` or the most recent version.""" - - @staticmethod - def get_alarms(name=None, user=None, state=None, meter=None, - project=None, enabled=None, alarm_id=None, - alarm_type=None, severity=None, exclude=None, - pagination=None): - """Yields a lists of alarms that match filters. - - :param name: Optional name for alarm. - :param user: Optional ID for user that owns the resource. - :param state: Optional string for alarm state. - :param meter: Optional string for alarms associated with meter. - :param project: Optional ID for project that owns the resource. - :param enabled: Optional boolean to list disable alarm. - :param alarm_id: Optional alarm_id to return one alarm. - :param alarm_type: Optional alarm type. - :param severity: Optional alarm severity. - :param exclude: Optional dict for inequality constraint. - :param pagination: Pagination parameters. - """ - raise aodh.NotImplementedError('Alarms not implemented') - - @staticmethod - def create_alarm(alarm): - """Create an alarm. Returns the alarm as created. - - :param alarm: The alarm to create. - """ - raise aodh.NotImplementedError('Alarms not implemented') - - @staticmethod - def update_alarm(alarm): - """Update alarm.""" - raise aodh.NotImplementedError('Alarms not implemented') - - @staticmethod - def delete_alarm(alarm_id): - """Delete an alarm and its history data.""" - raise aodh.NotImplementedError('Alarms not implemented') - - @staticmethod - def get_alarm_changes(alarm_id, on_behalf_of, - user=None, project=None, alarm_type=None, - severity=None, start_timestamp=None, - start_timestamp_op=None, end_timestamp=None, - end_timestamp_op=None, pagination=None): - """Yields list of AlarmChanges describing alarm history - - Changes are always sorted in reverse order of occurrence, given - the importance of currency. - - Segregation for non-administrative users is done on the basis - of the on_behalf_of parameter. This allows such users to have - visibility on both the changes initiated by themselves directly - (generally creation, rule changes, or deletion) and also on those - changes initiated on their behalf by the alarming service (state - transitions after alarm thresholds are crossed). - - :param alarm_id: ID of alarm to return changes for - :param on_behalf_of: ID of tenant to scope changes query (None for - administrative user, indicating all projects) - :param user: Optional ID of user to return changes for - :param project: Optional ID of project to return changes for - :param alarm_type: Optional change type - :param severity: Optional change severity - :param start_timestamp: Optional modified timestamp start range - :param start_timestamp_op: Optional timestamp start range operation - :param end_timestamp: Optional modified timestamp end range - :param end_timestamp_op: Optional timestamp end range operation - :param pagination: Pagination parameters. - """ - raise aodh.NotImplementedError('Alarm history not implemented') - - @staticmethod - def record_alarm_change(alarm_change): - """Record alarm change event.""" - raise aodh.NotImplementedError('Alarm history not implemented') - - @staticmethod - def clear(): - """Clear database.""" - - @staticmethod - def query_alarms(filter_expr=None, orderby=None, limit=None): - """Return an iterable of model.Alarm objects. - - :param filter_expr: Filter expression for query. - :param orderby: List of field name and direction pairs for order by. - :param limit: Maximum number of results to return. - """ - - raise aodh.NotImplementedError('Complex query for alarms ' - 'is not implemented.') - - @staticmethod - def query_alarm_history(filter_expr=None, orderby=None, limit=None): - """Return an iterable of model.AlarmChange objects. - - :param filter_expr: Filter expression for query. - :param orderby: List of field name and direction pairs for order by. - :param limit: Maximum number of results to return. - """ - - raise aodh.NotImplementedError('Complex query for alarms ' - 'history is not implemented.') - - @classmethod - def get_capabilities(cls): - """Return an dictionary with the capabilities of each driver.""" - return cls.CAPABILITIES - - @classmethod - def get_storage_capabilities(cls): - """Return a dictionary representing the performance capabilities. - - This is needed to evaluate the performance of each driver. - """ - return cls.STORAGE_CAPABILITIES - - @staticmethod - def clear_expired_alarm_history_data(alarm_history_ttl): - """Clear expired alarm history data from the backend storage system. - - Clearing occurs according to the time-to-live. - - :param alarm_history_ttl: Number of seconds to keep alarm history - records for. - """ - raise aodh.NotImplementedError('Clearing alarm history ' - 'not implemented') diff --git a/aodh/storage/impl_log.py b/aodh/storage/impl_log.py deleted file mode 100644 index da578b98..00000000 --- a/aodh/storage/impl_log.py +++ /dev/null @@ -1,68 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Simple logging storage backend. -""" - -from oslo_log import log - -from aodh.storage import base - -LOG = log.getLogger(__name__) - - -class Connection(base.Connection): - """Log the data.""" - - @staticmethod - def upgrade(): - pass - - @staticmethod - def clear(): - pass - - @staticmethod - def get_alarms(name=None, user=None, state=None, meter=None, - project=None, enabled=None, alarm_id=None, - alarm_type=None, severity=None, exclude=None, - pagination=None): - """Yields a lists of alarms that match filters.""" - return [] - - @staticmethod - def create_alarm(alarm): - """Create alarm.""" - return alarm - - @staticmethod - def update_alarm(alarm): - """Update alarm.""" - return alarm - - @staticmethod - def delete_alarm(alarm_id): - """Delete an alarm and its history data.""" - - @staticmethod - def clear_expired_alarm_history_data(alarm_history_ttl): - """Clear expired alarm history data from the backend storage system. - - Clearing occurs according to the time-to-live. - - :param alarm_history_ttl: Number of seconds to keep alarm history - records for. - """ - LOG.info('Dropping alarm history data with TTL %d', - alarm_history_ttl) diff --git a/aodh/storage/impl_sqlalchemy.py b/aodh/storage/impl_sqlalchemy.py deleted file mode 100644 index b9c465b6..00000000 --- a/aodh/storage/impl_sqlalchemy.py +++ /dev/null @@ -1,407 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -from __future__ import absolute_import -import copy -import datetime -import os.path - -from alembic import command -from alembic import config -from alembic import migration -from oslo_db.sqlalchemy import session as db_session -from oslo_db.sqlalchemy import utils as oslo_sql_utils -from oslo_log import log -from oslo_utils import timeutils -import six -from sqlalchemy import asc -from sqlalchemy import desc -from sqlalchemy.engine import url as sqlalchemy_url -from sqlalchemy import func -from sqlalchemy.orm import exc - -import aodh -from aodh import storage -from aodh.storage import base -from aodh.storage import models as alarm_api_models -from aodh.storage.sqlalchemy import models -from aodh.storage.sqlalchemy import utils as sql_utils - -LOG = log.getLogger(__name__) - -AVAILABLE_CAPABILITIES = { - 'alarms': {'query': {'simple': True, - 'complex': True}, - 'history': {'query': {'simple': True, - 'complex': True}}}, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -class Connection(base.Connection): - """Put the data into a SQLAlchemy database. """ - CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - STORAGE_CAPABILITIES = base.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - - def __init__(self, conf, url): - # Set max_retries to 0, since oslo.db in certain cases may attempt - # to retry making the db connection retried max_retries ^ 2 times - # in failure case and db reconnection has already been implemented - # in storage.__init__.get_connection_from_config function - options = dict(conf.database.items()) - options['max_retries'] = 0 - # oslo.db doesn't support options defined by Aodh - for opt in storage.OPTS: - options.pop(opt.name, None) - self._engine_facade = db_session.EngineFacade(self.dress_url(url), - **options) - self.conf = conf - - @staticmethod - def dress_url(url): - # If no explicit driver has been set, we default to pymysql - if url.startswith("mysql://"): - url = sqlalchemy_url.make_url(url) - url.drivername = "mysql+pymysql" - return str(url) - return url - - def disconnect(self): - self._engine_facade.get_engine().dispose() - - def _get_alembic_config(self): - cfg = config.Config( - "%s/sqlalchemy/alembic/alembic.ini" % os.path.dirname(__file__)) - cfg.set_main_option('sqlalchemy.url', - self.conf.database.connection) - return cfg - - def upgrade(self, nocreate=False): - cfg = self._get_alembic_config() - cfg.conf = self.conf - if nocreate: - command.upgrade(cfg, "head") - else: - engine = self._engine_facade.get_engine() - ctxt = migration.MigrationContext.configure(engine.connect()) - current_version = ctxt.get_current_revision() - if current_version is None: - models.Base.metadata.create_all(engine, checkfirst=False) - command.stamp(cfg, "head") - else: - command.upgrade(cfg, "head") - - def clear(self): - engine = self._engine_facade.get_engine() - for table in reversed(models.Base.metadata.sorted_tables): - engine.execute(table.delete()) - engine.dispose() - - def _retrieve_data(self, filter_expr, orderby, limit, table): - if limit == 0: - return [] - - session = self._engine_facade.get_session() - engine = self._engine_facade.get_engine() - query = session.query(table) - transformer = sql_utils.QueryTransformer(table, query, - dialect=engine.dialect.name) - if filter_expr is not None: - transformer.apply_filter(filter_expr) - - transformer.apply_options(orderby, - limit) - - retrieve = {models.Alarm: self._retrieve_alarms, - models.AlarmChange: self._retrieve_alarm_history} - return retrieve[table](transformer.get_query()) - - @staticmethod - def _row_to_alarm_model(row): - return alarm_api_models.Alarm(alarm_id=row.alarm_id, - enabled=row.enabled, - type=row.type, - name=row.name, - description=row.description, - timestamp=row.timestamp, - user_id=row.user_id, - project_id=row.project_id, - state=row.state, - state_timestamp=row.state_timestamp, - state_reason=row.state_reason, - ok_actions=row.ok_actions, - alarm_actions=row.alarm_actions, - insufficient_data_actions=( - row.insufficient_data_actions), - rule=row.rule, - time_constraints=row.time_constraints, - repeat_actions=row.repeat_actions, - severity=row.severity) - - def _retrieve_alarms(self, query): - return (self._row_to_alarm_model(x) for x in query.all()) - - @staticmethod - def _get_pagination_query(session, query, pagination, api_model, model): - if not pagination.get('sort'): - pagination['sort'] = api_model.DEFAULT_SORT - marker = None - if pagination.get('marker'): - key_attr = getattr(model, api_model.PRIMARY_KEY) - marker_query = copy.copy(query) - marker_query = marker_query.filter( - key_attr == pagination['marker']) - try: - marker = marker_query.limit(1).one() - except exc.NoResultFound: - raise storage.InvalidMarker( - 'Marker %s not found.' % pagination['marker']) - limit = pagination.get('limit') - # we sort by "severity" by its semantic than its alphabetical - # order when "severity" specified in sorts. - for sort_key, sort_dir in pagination['sort'][::-1]: - if sort_key == 'severity': - engine = session.connection() - if engine.dialect.name != "mysql": - raise aodh.NotImplementedError - sort_dir_func = {'asc': asc, 'desc': desc}[sort_dir] - query = query.order_by(sort_dir_func( - func.field(getattr(model, sort_key), 'low', - 'moderate', 'critical'))) - pagination['sort'].remove((sort_key, sort_dir)) - - sort_keys = [s[0] for s in pagination['sort']] - sort_dirs = [s[1] for s in pagination['sort']] - return oslo_sql_utils.paginate_query( - query, model, limit, sort_keys, sort_dirs=sort_dirs, marker=marker) - - def get_alarms(self, name=None, user=None, state=None, meter=None, - project=None, enabled=None, alarm_id=None, - alarm_type=None, severity=None, exclude=None, - pagination=None): - """Yields a lists of alarms that match filters. - - :param name: Optional name for alarm. - :param user: Optional ID for user that owns the resource. - :param state: Optional string for alarm state. - :param meter: Optional string for alarms associated with meter. - :param project: Optional ID for project that owns the resource. - :param enabled: Optional boolean to list disable alarm. - :param alarm_id: Optional alarm_id to return one alarm. - :param alarm_type: Optional alarm type. - :param severity: Optional alarm severity. - :param exclude: Optional dict for inequality constraint. - :param pagination: Pagination query parameters. - """ - - pagination = pagination or {} - session = self._engine_facade.get_session() - query = session.query(models.Alarm) - if name is not None: - query = query.filter(models.Alarm.name == name) - if enabled is not None: - query = query.filter(models.Alarm.enabled == enabled) - if user is not None: - query = query.filter(models.Alarm.user_id == user) - if project is not None: - query = query.filter(models.Alarm.project_id == project) - if alarm_id is not None: - query = query.filter(models.Alarm.alarm_id == alarm_id) - if state is not None: - query = query.filter(models.Alarm.state == state) - if alarm_type is not None: - query = query.filter(models.Alarm.type == alarm_type) - if severity is not None: - query = query.filter(models.Alarm.severity == severity) - if exclude is not None: - for key, value in six.iteritems(exclude): - query = query.filter(getattr(models.Alarm, key) != value) - - query = self._get_pagination_query( - session, query, pagination, alarm_api_models.Alarm, models.Alarm) - alarms = self._retrieve_alarms(query) - - # TODO(cmart): improve this by using sqlalchemy.func factory - if meter is not None: - alarms = filter(lambda row: - row.rule.get('meter_name', None) == meter, - alarms) - - return alarms - - def create_alarm(self, alarm): - """Create an alarm. - - :param alarm: The alarm to create. - """ - session = self._engine_facade.get_session() - with session.begin(): - alarm_row = models.Alarm(alarm_id=alarm.alarm_id) - alarm_row.update(alarm.as_dict()) - session.add(alarm_row) - - return self._row_to_alarm_model(alarm_row) - - def update_alarm(self, alarm): - """Update an alarm. - - :param alarm: the new Alarm to update - """ - session = self._engine_facade.get_session() - with session.begin(): - count = session.query(models.Alarm).filter( - models.Alarm.alarm_id == alarm.alarm_id).update( - alarm.as_dict()) - if not count: - raise storage.AlarmNotFound(alarm.alarm_id) - return alarm - - def delete_alarm(self, alarm_id): - """Delete an alarm and its history data. - - :param alarm_id: ID of the alarm to delete - """ - session = self._engine_facade.get_session() - with session.begin(): - session.query(models.Alarm).filter( - models.Alarm.alarm_id == alarm_id).delete() - # FIXME(liusheng): we should use delete cascade - session.query(models.AlarmChange).filter( - models.AlarmChange.alarm_id == alarm_id).delete() - - @staticmethod - def _row_to_alarm_change_model(row): - return alarm_api_models.AlarmChange(event_id=row.event_id, - alarm_id=row.alarm_id, - type=row.type, - detail=row.detail, - user_id=row.user_id, - project_id=row.project_id, - on_behalf_of=row.on_behalf_of, - timestamp=row.timestamp) - - def query_alarms(self, filter_expr=None, orderby=None, limit=None): - """Yields a lists of alarms that match filter.""" - return self._retrieve_data(filter_expr, orderby, limit, models.Alarm) - - def _retrieve_alarm_history(self, query): - return (self._row_to_alarm_change_model(x) for x in query.all()) - - def query_alarm_history(self, filter_expr=None, orderby=None, limit=None): - """Return an iterable of model.AlarmChange objects.""" - return self._retrieve_data(filter_expr, - orderby, - limit, - models.AlarmChange) - - def get_alarm_changes(self, alarm_id, on_behalf_of, - user=None, project=None, alarm_type=None, - severity=None, start_timestamp=None, - start_timestamp_op=None, end_timestamp=None, - end_timestamp_op=None, pagination=None): - """Yields list of AlarmChanges describing alarm history - - Changes are always sorted in reverse order of occurrence, given - the importance of currency. - - Segregation for non-administrative users is done on the basis - of the on_behalf_of parameter. This allows such users to have - visibility on both the changes initiated by themselves directly - (generally creation, rule changes, or deletion) and also on those - changes initiated on their behalf by the alarming service (state - transitions after alarm thresholds are crossed). - - :param alarm_id: ID of alarm to return changes for - :param on_behalf_of: ID of tenant to scope changes query (None for - administrative user, indicating all projects) - :param user: Optional ID of user to return changes for - :param project: Optional ID of project to return changes for - :param alarm_type: Optional change type - :param severity: Optional alarm severity - :param start_timestamp: Optional modified timestamp start range - :param start_timestamp_op: Optional timestamp start range operation - :param end_timestamp: Optional modified timestamp end range - :param end_timestamp_op: Optional timestamp end range operation - :param pagination: Pagination query parameters. - """ - pagination = pagination or {} - session = self._engine_facade.get_session() - query = session.query(models.AlarmChange) - query = query.filter(models.AlarmChange.alarm_id == alarm_id) - - if on_behalf_of is not None: - query = query.filter( - models.AlarmChange.on_behalf_of == on_behalf_of) - if user is not None: - query = query.filter(models.AlarmChange.user_id == user) - if project is not None: - query = query.filter(models.AlarmChange.project_id == project) - if alarm_type is not None: - query = query.filter(models.AlarmChange.type == alarm_type) - if severity is not None: - query = query.filter(models.AlarmChange.severity == severity) - if start_timestamp: - if start_timestamp_op == 'gt': - query = query.filter( - models.AlarmChange.timestamp > start_timestamp) - else: - query = query.filter( - models.AlarmChange.timestamp >= start_timestamp) - if end_timestamp: - if end_timestamp_op == 'le': - query = query.filter( - models.AlarmChange.timestamp <= end_timestamp) - else: - query = query.filter( - models.AlarmChange.timestamp < end_timestamp) - - query = self._get_pagination_query( - session, query, pagination, alarm_api_models.AlarmChange, - models.AlarmChange) - return self._retrieve_alarm_history(query) - - def record_alarm_change(self, alarm_change): - """Record alarm change event.""" - session = self._engine_facade.get_session() - with session.begin(): - alarm_change_row = models.AlarmChange( - event_id=alarm_change['event_id']) - alarm_change_row.update(alarm_change) - session.add(alarm_change_row) - - def clear_expired_alarm_history_data(self, alarm_history_ttl): - """Clear expired alarm history data from the backend storage system. - - Clearing occurs according to the time-to-live. - - :param alarm_history_ttl: Number of seconds to keep alarm history - records for. - """ - session = self._engine_facade.get_session() - with session.begin(): - valid_start = (timeutils.utcnow() - - datetime.timedelta(seconds=alarm_history_ttl)) - deleted_rows = (session.query(models.AlarmChange) - .filter(models.AlarmChange.timestamp < valid_start) - .delete()) - LOG.info("%d alarm histories are removed from database", - deleted_rows) diff --git a/aodh/storage/models.py b/aodh/storage/models.py deleted file mode 100644 index 43358054..00000000 --- a/aodh/storage/models.py +++ /dev/null @@ -1,150 +0,0 @@ -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Model classes for use in the storage API. -""" - -import datetime - -from aodh.i18n import _ -from aodh.storage import base - - -class Alarm(base.Model): - ALARM_INSUFFICIENT_DATA = 'insufficient data' - ALARM_OK = 'ok' - ALARM_ALARM = 'alarm' - - ALARM_ACTIONS_MAP = { - ALARM_INSUFFICIENT_DATA: 'insufficient_data_actions', - ALARM_OK: 'ok_actions', - ALARM_ALARM: 'alarm_actions', - } - - ALARM_LEVEL_LOW = 'low' - ALARM_LEVEL_MODERATE = 'moderate' - ALARM_LEVEL_CRITICAL = 'critical' - - SUPPORT_SORT_KEYS = ( - 'alarm_id', 'enabled', 'name', 'type', 'severity', 'timestamp', - 'user_id', 'project_id', 'state', 'repeat_actions', 'state_timestamp') - DEFAULT_SORT = [('timestamp', 'desc')] - PRIMARY_KEY = 'alarm_id' - - """ - An alarm to monitor. - - :param alarm_id: UUID of the alarm - :param type: type of the alarm - :param name: The Alarm name - :param description: User friendly description of the alarm - :param enabled: Is the alarm enabled - :param state: Alarm state (ok/alarm/insufficient data) - :param state_reason: Alarm state reason - :param rule: A rule that defines when the alarm fires - :param user_id: the owner/creator of the alarm - :param project_id: the project_id of the creator - :param evaluation_periods: the number of periods - :param period: the time period in seconds - :param time_constraints: the list of the alarm's time constraints, if any - :param timestamp: the timestamp when the alarm was last updated - :param state_timestamp: the timestamp of the last state change - :param ok_actions: the list of webhooks to call when entering the ok state - :param alarm_actions: the list of webhooks to call when entering the - alarm state - :param insufficient_data_actions: the list of webhooks to call when - entering the insufficient data state - :param repeat_actions: Is the actions should be triggered on each - alarm evaluation. - :param severity: Alarm level (low/moderate/critical) - """ - def __init__(self, alarm_id, type, enabled, name, description, - timestamp, user_id, project_id, state, state_timestamp, - state_reason, ok_actions, alarm_actions, - insufficient_data_actions, repeat_actions, rule, - time_constraints, severity=None): - if not isinstance(timestamp, datetime.datetime): - raise TypeError(_("timestamp should be datetime object")) - if not isinstance(state_timestamp, datetime.datetime): - raise TypeError(_("state_timestamp should be datetime object")) - base.Model.__init__( - self, - alarm_id=alarm_id, - type=type, - enabled=enabled, - name=name, - description=description, - timestamp=timestamp, - user_id=user_id, - project_id=project_id, - state=state, - state_timestamp=state_timestamp, - state_reason=state_reason, - ok_actions=ok_actions, - alarm_actions=alarm_actions, - insufficient_data_actions=insufficient_data_actions, - repeat_actions=repeat_actions, - rule=rule, - time_constraints=time_constraints, - severity=severity) - - -class AlarmChange(base.Model): - """Record of an alarm change. - - :param event_id: UUID of the change event - :param alarm_id: UUID of the alarm - :param type: The type of change - :param severity: The severity of alarm - :param detail: JSON fragment describing change - :param user_id: the user ID of the initiating identity - :param project_id: the project ID of the initiating identity - :param on_behalf_of: the tenant on behalf of which the change - is being made - :param timestamp: the timestamp of the change - """ - - CREATION = 'creation' - RULE_CHANGE = 'rule change' - STATE_TRANSITION = 'state transition' - DELETION = 'deletion' - - SUPPORT_SORT_KEYS = ( - 'event_id', 'alarm_id', 'on_behalf_of', 'project_id', 'user_id', - 'type', 'timestamp', 'severity') - DEFAULT_SORT = [('timestamp', 'desc')] - PRIMARY_KEY = 'event_id' - - def __init__(self, - event_id, - alarm_id, - type, - detail, - user_id, - project_id, - on_behalf_of, - severity=None, - timestamp=None - ): - base.Model.__init__( - self, - event_id=event_id, - alarm_id=alarm_id, - type=type, - severity=severity, - detail=detail, - user_id=user_id, - project_id=project_id, - on_behalf_of=on_behalf_of, - timestamp=timestamp) diff --git a/aodh/storage/sqlalchemy/__init__.py b/aodh/storage/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/storage/sqlalchemy/alembic/alembic.ini b/aodh/storage/sqlalchemy/alembic/alembic.ini deleted file mode 100644 index 57732fed..00000000 --- a/aodh/storage/sqlalchemy/alembic/alembic.ini +++ /dev/null @@ -1,37 +0,0 @@ -[alembic] -script_location = aodh.storage.sqlalchemy:alembic -sqlalchemy.url = - -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = WARN -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/aodh/storage/sqlalchemy/alembic/env.py b/aodh/storage/sqlalchemy/alembic/env.py deleted file mode 100644 index fd15eb5f..00000000 --- a/aodh/storage/sqlalchemy/alembic/env.py +++ /dev/null @@ -1,92 +0,0 @@ -# -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import with_statement -from alembic import context -from logging.config import fileConfig - -from aodh.storage import impl_sqlalchemy -from aodh.storage.sqlalchemy import models - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = models.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - conf = config.conf - context.configure(url=conf.database.connection, - target_metadata=target_metadata) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - conf = config.conf - conn = impl_sqlalchemy.Connection(conf, conf.database.connection) - connectable = conn._engine_facade.get_engine() - - with connectable.connect() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - conn.disconnect() - -if not hasattr(config, "conf"): - from aodh import service - config.conf = service.prepare_service([]) - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/aodh/storage/sqlalchemy/alembic/script.py.mako b/aodh/storage/sqlalchemy/alembic/script.py.mako deleted file mode 100644 index e1e7e5d3..00000000 --- a/aodh/storage/sqlalchemy/alembic/script.py.mako +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright ${create_date.year} OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py b/aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py deleted file mode 100644 index 7a2ecbfe..00000000 --- a/aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""initial base - -Revision ID: 12fe8fac9fe4 -Revises: -Create Date: 2015-07-28 17:38:37.022899 - -""" - -# revision identifiers, used by Alembic. -revision = '12fe8fac9fe4' -down_revision = None -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -from sqlalchemy import types - -import aodh.storage.sqlalchemy.models - - -class PreciseTimestamp(types.TypeDecorator): - """Represents a timestamp precise to the microsecond.""" - - impl = sa.DateTime - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor( - types.DECIMAL(precision=20, - scale=6, - asdecimal=True)) - return dialect.type_descriptor(self.impl) - - -def upgrade(): - op.create_table( - 'alarm_history', - sa.Column('event_id', sa.String(length=128), nullable=False), - sa.Column('alarm_id', sa.String(length=128), nullable=True), - sa.Column('on_behalf_of', sa.String(length=128), nullable=True), - sa.Column('project_id', sa.String(length=128), nullable=True), - sa.Column('user_id', sa.String(length=128), nullable=True), - sa.Column('type', sa.String(length=20), nullable=True), - sa.Column('detail', sa.Text(), nullable=True), - sa.Column('timestamp', - PreciseTimestamp(), - nullable=True), - sa.PrimaryKeyConstraint('event_id') - ) - op.create_index( - 'ix_alarm_history_alarm_id', 'alarm_history', ['alarm_id'], - unique=False) - op.create_table( - 'alarm', - sa.Column('alarm_id', sa.String(length=128), nullable=False), - sa.Column('enabled', sa.Boolean(), nullable=True), - sa.Column('name', sa.Text(), nullable=True), - sa.Column('type', sa.String(length=50), nullable=True), - sa.Column('severity', sa.String(length=50), nullable=True), - sa.Column('description', sa.Text(), nullable=True), - sa.Column('timestamp', - PreciseTimestamp(), - nullable=True), - sa.Column('user_id', sa.String(length=128), nullable=True), - sa.Column('project_id', sa.String(length=128), nullable=True), - sa.Column('state', sa.String(length=255), nullable=True), - sa.Column('state_timestamp', - PreciseTimestamp(), - nullable=True), - sa.Column('ok_actions', - aodh.storage.sqlalchemy.models.JSONEncodedDict(), - nullable=True), - sa.Column('alarm_actions', - aodh.storage.sqlalchemy.models.JSONEncodedDict(), - nullable=True), - sa.Column('insufficient_data_actions', - aodh.storage.sqlalchemy.models.JSONEncodedDict(), - nullable=True), - sa.Column('repeat_actions', sa.Boolean(), nullable=True), - sa.Column('rule', - aodh.storage.sqlalchemy.models.JSONEncodedDict(), - nullable=True), - sa.Column('time_constraints', - aodh.storage.sqlalchemy.models.JSONEncodedDict(), - nullable=True), - sa.PrimaryKeyConstraint('alarm_id') - ) - op.create_index( - 'ix_alarm_project_id', 'alarm', ['project_id'], unique=False) - op.create_index( - 'ix_alarm_user_id', 'alarm', ['user_id'], unique=False) diff --git a/aodh/storage/sqlalchemy/alembic/versions/367aadf5485f_precisetimestamp_to_datetime.py b/aodh/storage/sqlalchemy/alembic/versions/367aadf5485f_precisetimestamp_to_datetime.py deleted file mode 100644 index 7afb6656..00000000 --- a/aodh/storage/sqlalchemy/alembic/versions/367aadf5485f_precisetimestamp_to_datetime.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""precisetimestamp_to_datetime - -Revision ID: 367aadf5485f -Revises: f8c31b1ffe11 -Create Date: 2016-09-19 16:43:34.379029 - -""" - -# revision identifiers, used by Alembic. -revision = '367aadf5485f' -down_revision = 'f8c31b1ffe11' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -from sqlalchemy import func - -from aodh.storage.sqlalchemy import models - - -def upgrade(): - bind = op.get_bind() - if bind and bind.engine.name == "mysql": - # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER - # TABLE … USING …". We need to copy everything and convert… - for table_name, column_name in (("alarm", "timestamp"), - ("alarm", "state_timestamp"), - ("alarm_history", "timestamp")): - existing_type = sa.types.DECIMAL( - precision=20, scale=6, asdecimal=True) - existing_col = sa.Column( - column_name, - existing_type, - nullable=True) - temp_col = sa.Column( - column_name + "_ts", - models.TimestampUTC(), - nullable=True) - op.add_column(table_name, temp_col) - t = sa.sql.table(table_name, existing_col, temp_col) - op.execute(t.update().values( - **{column_name + "_ts": func.from_unixtime(existing_col)})) - op.drop_column(table_name, column_name) - op.alter_column(table_name, - column_name + "_ts", - nullable=True, - type_=models.TimestampUTC(), - existing_nullable=True, - existing_type=existing_type, - new_column_name=column_name) diff --git a/aodh/storage/sqlalchemy/alembic/versions/6ae0d05d9451_add_reason_column.py b/aodh/storage/sqlalchemy/alembic/versions/6ae0d05d9451_add_reason_column.py deleted file mode 100644 index e95873ac..00000000 --- a/aodh/storage/sqlalchemy/alembic/versions/6ae0d05d9451_add_reason_column.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2017 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add_reason_column - -Revision ID: 6ae0d05d9451 -Revises: 367aadf5485f -Create Date: 2017-06-05 16:42:42.379029 - -""" - -# revision identifiers, used by Alembic. -revision = '6ae0d05d9451' -down_revision = '367aadf5485f' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('alarm', sa.Column('state_reason', sa.Text, nullable=True)) diff --git a/aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py b/aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py deleted file mode 100644 index 756fb615..00000000 --- a/aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add severity to alarm history - -Revision ID: bb07adac380 -Revises: 12fe8fac9fe4 -Create Date: 2015-08-06 15:15:43.717068 - -""" - -# revision identifiers, used by Alembic. -revision = 'bb07adac380' -down_revision = '12fe8fac9fe4' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.add_column('alarm_history', - sa.Column('severity', sa.String(length=50), nullable=True)) diff --git a/aodh/storage/sqlalchemy/alembic/versions/f8c31b1ffe11_add_index_for_enabled_and_type.py b/aodh/storage/sqlalchemy/alembic/versions/f8c31b1ffe11_add_index_for_enabled_and_type.py deleted file mode 100644 index 793e029e..00000000 --- a/aodh/storage/sqlalchemy/alembic/versions/f8c31b1ffe11_add_index_for_enabled_and_type.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add index for enabled and type - -Revision ID: f8c31b1ffe11 -Revises: bb07adac380 -Create Date: 2016-06-02 19:39:42.495020 - -""" - -# revision identifiers, used by Alembic. -revision = 'f8c31b1ffe11' -down_revision = 'bb07adac380' -branch_labels = None -depends_on = None - -from alembic import op - - -def upgrade(): - op.create_index( - 'ix_alarm_enabled', 'alarm', ['enabled'], unique=False) - op.create_index( - 'ix_alarm_type', 'alarm', ['type'], unique=False) diff --git a/aodh/storage/sqlalchemy/models.py b/aodh/storage/sqlalchemy/models.py deleted file mode 100644 index f8d5eb2b..00000000 --- a/aodh/storage/sqlalchemy/models.py +++ /dev/null @@ -1,124 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for aodh data. -""" -import json - -from oslo_utils import timeutils -import six -from sqlalchemy import Column, String, Index, Boolean, Text, DateTime -from sqlalchemy.dialects import mysql -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.types import TypeDecorator - - -class JSONEncodedDict(TypeDecorator): - """Represents an immutable structure as a json-encoded string.""" - - impl = Text - - @staticmethod - def process_bind_param(value, dialect): - if value is not None: - value = json.dumps(value) - return value - - @staticmethod - def process_result_value(value, dialect): - if value is not None: - value = json.loads(value) - return value - - -class TimestampUTC(TypeDecorator): - """Represents a timestamp precise to the microsecond.""" - - impl = DateTime - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor(mysql.DATETIME(fsp=6)) - return self.impl - - -class AodhBase(object): - """Base class for Aodh Models.""" - __table_args__ = {'mysql_charset': "utf8", - 'mysql_engine': "InnoDB"} - __table_initialized__ = False - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, key): - return getattr(self, key) - - def update(self, values): - """Make the model object behave like a dict.""" - for k, v in six.iteritems(values): - setattr(self, k, v) - - -Base = declarative_base(cls=AodhBase) - - -class Alarm(Base): - """Define Alarm data.""" - __tablename__ = 'alarm' - __table_args__ = ( - Index('ix_alarm_user_id', 'user_id'), - Index('ix_alarm_project_id', 'project_id'), - Index('ix_alarm_enabled', 'enabled'), - Index('ix_alarm_type', 'type'), - ) - alarm_id = Column(String(128), primary_key=True) - enabled = Column(Boolean) - name = Column(Text) - type = Column(String(50)) - severity = Column(String(50)) - description = Column(Text) - timestamp = Column(TimestampUTC, default=lambda: timeutils.utcnow()) - - user_id = Column(String(128)) - project_id = Column(String(128)) - - state = Column(String(255)) - state_reason = Column(Text) - state_timestamp = Column(TimestampUTC, - default=lambda: timeutils.utcnow()) - - ok_actions = Column(JSONEncodedDict) - alarm_actions = Column(JSONEncodedDict) - insufficient_data_actions = Column(JSONEncodedDict) - repeat_actions = Column(Boolean) - - rule = Column(JSONEncodedDict) - time_constraints = Column(JSONEncodedDict) - - -class AlarmChange(Base): - """Define AlarmChange data.""" - __tablename__ = 'alarm_history' - __table_args__ = ( - Index('ix_alarm_history_alarm_id', 'alarm_id'), - ) - event_id = Column(String(128), primary_key=True) - alarm_id = Column(String(128)) - on_behalf_of = Column(String(128)) - project_id = Column(String(128)) - user_id = Column(String(128)) - type = Column(String(20)) - detail = Column(Text) - timestamp = Column(TimestampUTC, default=lambda: timeutils.utcnow()) - severity = Column(String(50)) diff --git a/aodh/storage/sqlalchemy/utils.py b/aodh/storage/sqlalchemy/utils.py deleted file mode 100644 index d32dc149..00000000 --- a/aodh/storage/sqlalchemy/utils.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import operator - -from sqlalchemy import and_ -from sqlalchemy import asc -from sqlalchemy import desc -from sqlalchemy import func -from sqlalchemy import not_ -from sqlalchemy import or_ - - -class QueryTransformer(object): - operators = {"=": operator.eq, - "<": operator.lt, - ">": operator.gt, - "<=": operator.le, - "=<": operator.le, - ">=": operator.ge, - "=>": operator.ge, - "!=": operator.ne, - "in": lambda field_name, values: field_name.in_(values), - "=~": lambda field, value: field.op("regexp")(value)} - - # operators which are different for different dialects - dialect_operators = {'postgresql': {'=~': (lambda field, value: - field.op("~")(value))}} - - complex_operators = {"or": or_, - "and": and_, - "not": not_} - - ordering_functions = {"asc": asc, - "desc": desc} - - def __init__(self, table, query, dialect='mysql'): - self.table = table - self.query = query - self.dialect_name = dialect - - def _get_operator(self, op): - return (self.dialect_operators.get(self.dialect_name, {}).get(op) - or self.operators[op]) - - def _handle_complex_op(self, complex_op, nodes): - op = self.complex_operators[complex_op] - if op == not_: - nodes = [nodes] - element_list = [] - for node in nodes: - element = self._transform(node) - element_list.append(element) - return op(*element_list) - - def _handle_simple_op(self, simple_op, nodes): - op = self._get_operator(simple_op) - field_name, value = list(nodes.items())[0] - return op(getattr(self.table, field_name), value) - - def _transform(self, sub_tree): - operator, nodes = list(sub_tree.items())[0] - if operator in self.complex_operators: - return self._handle_complex_op(operator, nodes) - else: - return self._handle_simple_op(operator, nodes) - - def apply_filter(self, expression_tree): - condition = self._transform(expression_tree) - self.query = self.query.filter(condition) - - def apply_options(self, orderby, limit): - self._apply_order_by(orderby) - if limit is not None: - self.query = self.query.limit(limit) - - def _apply_order_by(self, orderby): - if orderby is not None: - for field in orderby: - attr, order = list(field.items())[0] - ordering_function = self.ordering_functions[order] - if attr == 'severity': - self.query = self.query.order_by(ordering_function( - func.field(getattr(self.table, attr), 'low', - 'moderate', 'critical'))) - else: - self.query = self.query.order_by(ordering_function( - getattr(self.table, attr))) - else: - self.query = self.query.order_by(desc(self.table.timestamp)) - - def get_query(self): - return self.query diff --git a/aodh/tests/__init__.py b/aodh/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/base.py b/aodh/tests/base.py deleted file mode 100644 index 2a4942e0..00000000 --- a/aodh/tests/base.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 New Dream Network (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test base classes. -""" -import fixtures -import functools -import os.path -import unittest - -import oslo_messaging.conffixture -from oslo_utils import timeutils -from oslotest import base -import six -import webtest - -import aodh -from aodh import messaging - - -class BaseTestCase(base.BaseTestCase): - def setup_messaging(self, conf, exchange=None): - self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) - conf.set_override("notification_driver", ["messaging"]) - if not exchange: - exchange = 'aodh' - conf.set_override("control_exchange", exchange) - - # NOTE(sileht): Ensure a new oslo.messaging driver is loaded - # between each tests - self.transport = messaging.get_transport(conf, "fake://", cache=False) - self.useFixture(fixtures.MockPatch( - 'aodh.messaging.get_transport', - return_value=self.transport)) - - def assertTimestampEqual(self, first, second, msg=None): - """Checks that two timestamps are equals. - - This relies on assertAlmostEqual to avoid rounding problem, and only - checks up the first microsecond values. - - """ - return self.assertAlmostEqual( - timeutils.delta_seconds(first, second), - 0.0, - places=5) - - def assertIsEmpty(self, obj): - try: - if len(obj) != 0: - self.fail("%s is not empty" % type(obj)) - except (TypeError, AttributeError): - self.fail("%s doesn't have length" % type(obj)) - - def assertIsNotEmpty(self, obj): - try: - if len(obj) == 0: - self.fail("%s is empty" % type(obj)) - except (TypeError, AttributeError): - self.fail("%s doesn't have length" % type(obj)) - - @staticmethod - def path_get(project_file=None): - root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..', - ) - ) - if project_file: - return os.path.join(root, project_file) - else: - return root - - -def _skip_decorator(func): - @functools.wraps(func) - def skip_if_not_implemented(*args, **kwargs): - try: - return func(*args, **kwargs) - except aodh.NotImplementedError as e: - raise unittest.SkipTest(six.text_type(e)) - except webtest.app.AppError as e: - if 'not implemented' in six.text_type(e): - raise unittest.SkipTest(six.text_type(e)) - raise - return skip_if_not_implemented - - -class SkipNotImplementedMeta(type): - def __new__(cls, name, bases, local): - for attr in local: - value = local[attr] - if callable(value) and ( - attr.startswith('test_') or attr == 'setUp'): - local[attr] = _skip_decorator(value) - return type.__new__(cls, name, bases, local) diff --git a/aodh/tests/constants.py b/aodh/tests/constants.py deleted file mode 100644 index 50678fc9..00000000 --- a/aodh/tests/constants.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2014 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -MIN_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1) diff --git a/aodh/tests/functional/__init__.py b/aodh/tests/functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/functional/api/__init__.py b/aodh/tests/functional/api/__init__.py deleted file mode 100644 index 184a00cb..00000000 --- a/aodh/tests/functional/api/__init__.py +++ /dev/null @@ -1,155 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base classes for API tests. -""" - -from oslo_config import fixture as fixture_config -import webtest - -from aodh.api import app -from aodh import service -from aodh.tests.functional import db as db_test_base - - -class FunctionalTest(db_test_base.TestBase): - """Used for functional tests of Pecan controllers. - - Used in case when you need to test your literal application and its - integration with the framework. - """ - - PATH_PREFIX = '' - - def setUp(self): - super(FunctionalTest, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.setup_messaging(self.CONF) - - self.CONF.set_override('auth_mode', None, group='api') - self.app = webtest.TestApp(app.load_app(self.CONF)) - - def put_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PUT request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param extra_environ: A dictionary of environ variables to send along - with the request - :param status: Expected status code of response - """ - return self.post_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="put") - - def post_json(self, path, params, expect_errors=False, headers=None, - method="post", extra_environ=None, status=None): - """Sends simulated HTTP POST request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param method: Request method type. Appropriate method function call - should be used rather than passing attribute in. - :param extra_environ: A dictionary of environ variables to send along - with the request - :param status: Expected status code of response - """ - full_path = self.PATH_PREFIX + path - response = getattr(self.app, "%s_json" % method)( - str(full_path), - params=params, - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors - ) - return response - - def delete(self, path, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP DELETE request to Pecan test app. - - :param path: url path of target service - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param extra_environ: A dictionary of environ variables to send along - with the request - :param status: Expected status code of response - """ - full_path = self.PATH_PREFIX + path - response = self.app.delete(str(full_path), - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors) - return response - - def get_json(self, path, expect_errors=False, headers=None, - extra_environ=None, q=None, groupby=None, status=None, - override_params=None, **params): - """Sends simulated HTTP GET request to Pecan test app. - - :param path: url path of target service - :param expect_errors: boolean value whether an error is expected based - on request - :param headers: A dictionary of headers to send along with the request - :param extra_environ: A dictionary of environ variables to send along - with the request - :param q: list of queries consisting of: field, value, op, and type - keys - :param groupby: list of fields to group by - :param status: Expected status code of response - :param override_params: literally encoded query param string - :param params: content for wsgi.input of request - """ - q = q or [] - groupby = groupby or [] - full_path = self.PATH_PREFIX + path - if override_params: - all_params = override_params - else: - query_params = {'q.field': [], - 'q.value': [], - 'q.op': [], - 'q.type': [], - } - for query in q: - for name in ['field', 'op', 'value', 'type']: - query_params['q.%s' % name].append(query.get(name, '')) - all_params = {} - all_params.update(params) - if q: - all_params.update(query_params) - if groupby: - all_params.update({'groupby': groupby}) - response = self.app.get(full_path, - params=all_params, - headers=headers, - extra_environ=extra_environ, - expect_errors=expect_errors, - status=status) - if not expect_errors: - response = response.json - return response diff --git a/aodh/tests/functional/api/test_app.py b/aodh/tests/functional/api/test_app.py deleted file mode 100644 index cdf12cf8..00000000 --- a/aodh/tests/functional/api/test_app.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2014 IBM Corp. All Rights Reserved. -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config - -from aodh.api import app -from aodh import service -from aodh.tests import base - - -class TestApp(base.BaseTestCase): - - def setUp(self): - super(TestApp, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - - def test_api_paste_file_not_exist(self): - self.CONF.set_override('paste_config', 'non-existent-file', "api") - with mock.patch.object(self.CONF, 'find_file') as ff: - ff.return_value = None - self.assertRaises(cfg.ConfigFilesNotFoundError, - app.load_app, self.CONF) diff --git a/aodh/tests/functional/api/test_versions.py b/aodh/tests/functional/api/test_versions.py deleted file mode 100644 index 5dd8c251..00000000 --- a/aodh/tests/functional/api/test_versions.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from aodh.tests.functional import api - -V2_MEDIA_TYPES = [ - { - 'base': 'application/json', - 'type': 'application/vnd.openstack.telemetry-v2+json' - }, { - 'base': 'application/xml', - 'type': 'application/vnd.openstack.telemetry-v2+xml' - } -] - -V2_HTML_DESCRIPTION = { - 'href': 'http://docs.openstack.org/', - 'rel': 'describedby', - 'type': 'text/html', -} - -V2_EXPECTED_RESPONSE = { - 'id': 'v2', - 'links': [ - { - 'rel': 'self', - 'href': 'http://localhost/v2', - }, - V2_HTML_DESCRIPTION - ], - 'media-types': V2_MEDIA_TYPES, - 'status': 'stable', - 'updated': '2013-02-13T00:00:00Z', -} - -V2_VERSION_RESPONSE = { - "version": V2_EXPECTED_RESPONSE -} - -VERSIONS_RESPONSE = { - "versions": { - "values": [ - V2_EXPECTED_RESPONSE - ] - } -} - - -class TestVersions(api.FunctionalTest): - - def test_versions(self): - data = self.get_json('/') - self.assertEqual(VERSIONS_RESPONSE, data) diff --git a/aodh/tests/functional/api/v2/__init__.py b/aodh/tests/functional/api/v2/__init__.py deleted file mode 100644 index 7f9e7354..00000000 --- a/aodh/tests/functional/api/v2/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from aodh.tests.functional import api - - -class FunctionalTest(api.FunctionalTest): - PATH_PREFIX = '/v2' diff --git a/aodh/tests/functional/api/v2/policy.json-test b/aodh/tests/functional/api/v2/policy.json-test deleted file mode 100644 index 58a01a7a..00000000 --- a/aodh/tests/functional/api/v2/policy.json-test +++ /dev/null @@ -1,7 +0,0 @@ -{ - "context_is_admin": "role:admin", - "segregation": "rule:context_is_admin", - "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - "telemetry:get_alarms": "role:admin" -} diff --git a/aodh/tests/functional/api/v2/test_alarm_scenarios.py b/aodh/tests/functional/api/v2/test_alarm_scenarios.py deleted file mode 100644 index 5699fb9c..00000000 --- a/aodh/tests/functional/api/v2/test_alarm_scenarios.py +++ /dev/null @@ -1,3062 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests alarm operation.""" - -import datetime -import os - -import mock -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -import six -from six import moves -import webtest - -from aodh.api import app -from aodh import messaging -from aodh.storage import models -from aodh.tests import constants -from aodh.tests.functional.api import v2 - - -def default_alarms(auth_headers): - return [models.Alarm(name='name1', - type='threshold', - enabled=True, - alarm_id='a', - description='a', - state='insufficient data', - state_reason='Not evaluated', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=auth_headers['X-User-Id'], - project_id=auth_headers['X-Project-Id'], - time_constraints=[dict(name='testcons', - start='0 11 * * *', - duration=300)], - rule=dict(comparison_operator='gt', - threshold=2.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[{'field': 'project_id', - 'op': 'eq', 'value': - auth_headers['X-Project-Id']} - ]), - ), - models.Alarm(name='name2', - type='threshold', - enabled=True, - alarm_id='b', - description='b', - state='insufficient data', - state_reason='Not evaluated', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=auth_headers['X-User-Id'], - project_id=auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=4.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[{'field': 'project_id', - 'op': 'eq', 'value': - auth_headers['X-Project-Id']} - ]), - ), - models.Alarm(name='name3', - type='threshold', - enabled=True, - alarm_id='c', - description='c', - state='insufficient data', - state_reason='Not evaluated', - severity='moderate', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=auth_headers['X-User-Id'], - project_id=auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=3.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.mine', - query=[{'field': 'project_id', - 'op': 'eq', 'value': - auth_headers['X-Project-Id']} - ]), - )] - - -class TestAlarmsBase(v2.FunctionalTest): - - def setUp(self): - super(TestAlarmsBase, self).setUp() - self.auth_headers = {'X-User-Id': uuidutils.generate_uuid(), - 'X-Project-Id': uuidutils.generate_uuid()} - - @staticmethod - def _add_default_threshold_rule(alarm): - if (alarm['type'] == 'threshold' and - 'exclude_outliers' not in alarm['threshold_rule']): - alarm['threshold_rule']['exclude_outliers'] = False - - def _verify_alarm(self, json, alarm, expected_name=None): - if expected_name and alarm.name != expected_name: - self.fail("Alarm not found") - self._add_default_threshold_rule(json) - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(json[key], getattr(alarm, storage_key)) - - def _get_alarm(self, id, auth_headers=None): - data = self.get_json('/alarms', - headers=auth_headers or self.auth_headers) - match = [a for a in data if a['alarm_id'] == id] - self.assertEqual(1, len(match), 'alarm %s not found' % id) - return match[0] - - def _update_alarm(self, id, updated_data, auth_headers=None): - data = self._get_alarm(id, auth_headers) - data.update(updated_data) - self.put_json('/alarms/%s' % id, - params=data, - headers=auth_headers or self.auth_headers) - - def _delete_alarm(self, id, auth_headers=None): - self.delete('/alarms/%s' % id, - headers=auth_headers or self.auth_headers, - status=204) - - -class TestListEmptyAlarms(TestAlarmsBase): - - def test_empty(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual([], data) - - -class TestAlarms(TestAlarmsBase): - - def setUp(self): - super(TestAlarms, self).setUp() - for alarm in default_alarms(self.auth_headers): - self.alarm_conn.create_alarm(alarm) - - def test_list_alarms(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(3, len(data)) - self.assertEqual(set(['name1', 'name2', 'name3']), - set(r['name'] for r in data)) - self.assertEqual(set(['meter.test', 'meter.mine']), - set(r['threshold_rule']['meter_name'] - for r in data if 'threshold_rule' in r)) - - def test_alarms_query_with_timestamp(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - resp = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - expect_errors=True) - self.assertEqual(resp.status_code, 400) - self.assertEqual(jsonutils.loads(resp.body)['error_message'] - ['faultstring'], - 'Unknown argument: "timestamp": ' - 'not valid for this resource') - - def test_alarms_query_with_meter(self): - resp = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'meter', - 'op': 'eq', - 'value': 'meter.mine'}], - ) - self.assertEqual(1, len(resp)) - self.assertEqual('c', - resp[0]['alarm_id']) - self.assertEqual('meter.mine', - resp[0] - ['threshold_rule'] - ['meter_name']) - - def test_alarms_query_with_state(self): - alarm = models.Alarm(name='disabled', - type='threshold', - enabled=False, - alarm_id='c', - description='c', - state='ok', - state_reason='Not evaluated', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=3.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.mine', - query=[ - {'field': 'project_id', - 'op': 'eq', 'value': - self.auth_headers['X-Project-Id']} - ]), - severity='critical') - self.alarm_conn.update_alarm(alarm) - resp = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'state', - 'op': 'eq', - 'value': 'ok'}], - ) - self.assertEqual(1, len(resp)) - self.assertEqual('ok', resp[0]['state']) - - def test_list_alarms_by_type(self): - alarms = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'type', - 'op': 'eq', - 'value': 'threshold'}]) - self.assertEqual(3, len(alarms)) - self.assertEqual(set(['threshold']), - set(alarm['type'] for alarm in alarms)) - - def test_get_not_existing_alarm(self): - resp = self.get_json('/alarms/alarm-id-3', - headers=self.auth_headers, - expect_errors=True) - self.assertEqual(404, resp.status_code) - self.assertEqual('Alarm alarm-id-3 not found in project %s' % - self.auth_headers["X-Project-Id"], - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_get_alarm(self): - alarms = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual('name1', alarms[0]['name']) - self.assertEqual('meter.test', - alarms[0]['threshold_rule']['meter_name']) - - one = self.get_json('/alarms/%s' % alarms[0]['alarm_id'], - headers=self.auth_headers) - self.assertEqual('name1', one['name']) - self.assertEqual('meter.test', one['threshold_rule']['meter_name']) - self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) - self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) - self.assertEqual(alarms[0]['time_constraints'], - one['time_constraints']) - - def test_get_alarm_disabled(self): - alarm = models.Alarm(name='disabled', - type='threshold', - enabled=False, - alarm_id='c', - description='c', - state='insufficient data', - state_reason='Not evaluated', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=3.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.mine', - query=[ - {'field': 'project_id', - 'op': 'eq', 'value': - self.auth_headers['X-Project-Id']} - ]), - severity='critical') - self.alarm_conn.update_alarm(alarm) - - alarms = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'enabled', - 'value': 'False'}]) - self.assertEqual(1, len(alarms)) - self.assertEqual('disabled', alarms[0]['name']) - - one = self.get_json('/alarms/%s' % alarms[0]['alarm_id'], - headers=self.auth_headers) - self.assertEqual('disabled', one['name']) - - def test_get_alarm_project_filter_wrong_op_normal_user(self): - project = self.auth_headers['X-Project-Id'] - - def _test(field, op): - response = self.get_json('/alarms', - q=[{'field': field, - 'op': op, - 'value': project}], - expect_errors=True, - status=400, - headers=self.auth_headers) - faultstring = ('Invalid input for field/attribute op. ' - 'Value: \'%(op)s\'. unimplemented operator ' - 'for %(field)s' % {'field': field, 'op': op}) - self.assertEqual(faultstring, - response.json['error_message']['faultstring']) - - _test('project', 'ne') - _test('project_id', 'ne') - - def test_get_alarm_project_filter_normal_user(self): - project = self.auth_headers['X-Project-Id'] - - def _test(field): - alarms = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': field, - 'op': 'eq', - 'value': project}]) - self.assertEqual(3, len(alarms)) - - _test('project') - _test('project_id') - - def test_get_alarm_other_project_normal_user(self): - def _test(field): - response = self.get_json('/alarms', - q=[{'field': field, - 'op': 'eq', - 'value': 'other-project'}], - expect_errors=True, - status=401, - headers=self.auth_headers) - faultstring = 'Not Authorized to access project other-project' - self.assertEqual(faultstring, - response.json['error_message']['faultstring']) - - _test('project') - _test('project_id') - - def test_get_alarm_forbiden(self): - pf = os.path.abspath('aodh/tests/functional/api/v2/policy.json-test') - self.CONF.set_override('policy_file', pf, group='oslo_policy') - self.CONF.set_override('auth_mode', None, group='api') - self.app = webtest.TestApp(app.load_app(self.CONF)) - - response = self.get_json('/alarms', - expect_errors=True, - status=403, - headers=self.auth_headers) - faultstring = 'RBAC Authorization Failed' - self.assertEqual(403, response.status_code) - self.assertEqual(faultstring, - response.json['error_message']['faultstring']) - - def test_post_alarm_wsme_workaround(self): - jsons = { - 'type': { - 'name': 'missing type', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 2.0, - } - }, - 'name': { - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 2.0, - } - }, - 'threshold_rule/meter_name': { - 'name': 'missing meter_name', - 'type': 'threshold', - 'threshold_rule': { - 'threshold': 2.0, - } - }, - 'threshold_rule/threshold': { - 'name': 'missing threshold', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - } - }, - } - for field, json in six.iteritems(jsons): - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - self.assertEqual("Invalid input for field/attribute %s." - " Value: \'None\'. Mandatory field missing." - % field.split('/', 1)[-1], - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_time_constraint_start(self): - json = { - 'name': 'added_alarm_invalid_constraint_duration', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '11:00am', - 'duration': 10 - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_duplicate_time_constraint_name(self): - json = { - 'name': 'added_alarm_duplicate_constraint_name', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '* 11 * * *', - 'duration': 10 - }, - { - 'name': 'testcons', - 'start': '* * * * *', - 'duration': 20 - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - self.assertEqual( - "Time constraint names must be unique for a given alarm.", - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_alarm_null_time_constraint(self): - json = { - 'name': 'added_alarm_invalid_constraint_duration', - 'type': 'threshold', - 'time_constraints': None, - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - - def test_post_invalid_alarm_time_constraint_duration(self): - json = { - 'name': 'added_alarm_invalid_constraint_duration', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '* 11 * * *', - 'duration': -1, - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_time_constraint_timezone(self): - json = { - 'name': 'added_alarm_invalid_constraint_timezone', - 'type': 'threshold', - 'time_constraints': [ - { - 'name': 'testcons', - 'start': '* 11 * * *', - 'duration': 10, - 'timezone': 'aaaa' - } - ], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_period(self): - json = { - 'name': 'added_alarm_invalid_period', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - 'period': -1, - } - - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_null_rule(self): - json = { - 'name': 'added_alarm_invalid_threshold_rule', - 'type': 'threshold', - 'threshold_rule': None, - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - self.assertEqual( - "threshold_rule must be set for threshold type alarm", - resp.json['error_message']['faultstring']) - - def test_post_invalid_alarm_input_state(self): - json = { - 'name': 'alarm1', - 'state': 'bad_state', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute state." - " Value: 'bad_state'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_input_severity(self): - json = { - 'name': 'alarm1', - 'state': 'ok', - 'severity': 'bad_value', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute severity." - " Value: 'bad_value'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_input_type(self): - json = { - 'name': 'alarm3', - 'state': 'ok', - 'type': 'bad_type', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " type." - " Value: 'bad_type'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_input_enabled_str(self): - json = { - 'name': 'alarm5', - 'enabled': 'bad_enabled', - 'state': 'ok', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = "Value not an unambiguous boolean: bad_enabled" - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_post_invalid_alarm_input_enabled_int(self): - json = { - 'name': 'alarm6', - 'enabled': 0, - 'state': 'ok', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, - headers=self.auth_headers) - self.assertFalse(resp.json['enabled']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(4, len(alarms)) - - def _do_post_alarm_invalid_action(self, ok_actions=None, - alarm_actions=None, - insufficient_data_actions=None, - error_message=None): - - ok_actions = ok_actions or [] - alarm_actions = alarm_actions or [] - insufficient_data_actions = insufficient_data_actions or [] - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'ok_actions': ok_actions, - 'alarm_actions': alarm_actions, - 'insufficient_data_actions': insufficient_data_actions, - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - resp = self.post_json('/alarms', params=json, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - self.assertEqual(error_message, - resp.json['error_message']['faultstring']) - - def test_post_invalid_alarm_ok_actions(self): - self._do_post_alarm_invalid_action( - ok_actions=['spam://something/ok'], - error_message='Unsupported action spam://something/ok') - - def test_post_invalid_alarm_alarm_actions(self): - self._do_post_alarm_invalid_action( - alarm_actions=['spam://something/alarm'], - error_message='Unsupported action spam://something/alarm') - - def test_post_invalid_alarm_insufficient_data_actions(self): - self._do_post_alarm_invalid_action( - insufficient_data_actions=['spam://something/insufficient'], - error_message='Unsupported action spam://something/insufficient') - - @staticmethod - def _fake_urlsplit(*args, **kwargs): - raise Exception("Evil urlsplit!") - - def test_post_invalid_alarm_actions_format(self): - with mock.patch('oslo_utils.netutils.urlsplit', - self._fake_urlsplit): - self._do_post_alarm_invalid_action( - alarm_actions=['http://[::1'], - error_message='Unable to parse action http://[::1') - - def test_post_alarm_defaults(self): - to_check = { - 'enabled': True, - 'name': 'added_alarm_defaults', - 'ok_actions': [], - 'alarm_actions': [], - 'insufficient_data_actions': [], - 'repeat_actions': False, - } - - json = { - 'name': 'added_alarm_defaults', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(4, len(alarms)) - for alarm in alarms: - if alarm.name == 'added_alarm_defaults': - for key in to_check: - self.assertEqual(to_check[key], - getattr(alarm, key)) - break - else: - self.fail("Alarm not found") - - def test_post_alarm_with_same_name(self): - json = { - 'enabled': False, - 'name': 'dup_alarm_name', - 'state': 'ok', - 'type': 'threshold', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - - resp1 = self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - resp2 = self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - self.assertEqual(resp1.json['name'], resp2.json['name']) - self.assertNotEqual(resp1.json['alarm_id'], resp2.json['alarm_id']) - alarms = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'dup_alarm_name'}]) - self.assertEqual(2, len(alarms)) - - def _do_test_post_alarm(self, exclude_outliers=None): - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'state_reason': 'ignored', - 'type': 'threshold', - 'severity': 'low', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - if exclude_outliers is not None: - json['threshold_rule']['exclude_outliers'] = exclude_outliers - - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - json['threshold_rule']['query'].append({ - 'field': 'project_id', 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}) - # to check to IntegerType type conversion - json['threshold_rule']['evaluation_periods'] = 3 - json['threshold_rule']['period'] = 180 - # to check it's read only - json['state_reason'] = "Not evaluated yet" - self._verify_alarm(json, alarms[0], 'added_alarm') - - def test_post_alarm_outlier_exclusion_set(self): - self._do_test_post_alarm(True) - - def test_post_alarm_outlier_exclusion_clear(self): - self._do_test_post_alarm(False) - - def test_post_alarm_outlier_exclusion_defaulted(self): - self._do_test_post_alarm() - - def test_post_alarm_noauth(self): - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'low', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'exclude_outliers': False, - 'period': '180', - } - } - self.post_json('/alarms', params=json, status=201) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - # to check to BoundedInt type conversion - json['threshold_rule']['evaluation_periods'] = 3 - json['threshold_rule']['period'] = 180 - if alarms[0].name == 'added_alarm': - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(getattr(alarms[0], storage_key), - json[key]) - else: - self.fail("Alarm not found") - - def _do_test_post_alarm_as_admin(self, explicit_project_constraint): - """Test the creation of an alarm as admin for another project.""" - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'user_id': 'auseridthatisnotmine', - 'project_id': 'aprojectidthatisnotmine', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - if explicit_project_constraint: - project_constraint = {'field': 'project_id', 'op': 'eq', - 'value': 'aprojectidthatisnotmine'} - json['threshold_rule']['query'].append(project_constraint) - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=json, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual('auseridthatisnotmine', alarms[0].user_id) - self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) - self._add_default_threshold_rule(json) - if alarms[0].name == 'added_alarm': - for key in json: - if key.endswith('_rule'): - storage_key = 'rule' - if explicit_project_constraint: - self.assertEqual(json[key], - getattr(alarms[0], storage_key)) - else: - query = getattr(alarms[0], storage_key).get('query') - self.assertEqual(2, len(query)) - implicit_constraint = { - u'field': u'project_id', - u'value': u'aprojectidthatisnotmine', - u'op': u'eq' - } - self.assertEqual(implicit_constraint, query[1]) - else: - self.assertEqual(json[key], getattr(alarms[0], key)) - else: - self.fail("Alarm not found") - - def test_post_alarm_as_admin_explicit_project_constraint(self): - """Test the creation of an alarm as admin for another project. - - With an explicit query constraint on the owner's project ID. - """ - self._do_test_post_alarm_as_admin(True) - - def test_post_alarm_as_admin_implicit_project_constraint(self): - """Test the creation of an alarm as admin for another project. - - Test without an explicit query constraint on the owner's project ID. - """ - self._do_test_post_alarm_as_admin(False) - - def test_post_alarm_as_admin_no_user(self): - """Test the creation of an alarm. - - Test the creation of an alarm as admin for another project but - forgetting to set the values. - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'project_id': 'aprojectidthatisnotmine', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}, - {'field': 'project_id', 'op': 'eq', - 'value': 'aprojectidthatisnotmine'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=json, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual(self.auth_headers['X-User-Id'], alarms[0].user_id) - self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) - self._verify_alarm(json, alarms[0], 'added_alarm') - - def test_post_alarm_as_admin_no_project(self): - """Test the creation of an alarm. - - Test the creation of an alarm as admin for another project but - forgetting to set the values. - """ - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'threshold', - 'user_id': 'auseridthatisnotmine', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}, - {'field': 'project_id', 'op': 'eq', - 'value': 'aprojectidthatisnotmine'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=json, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual('auseridthatisnotmine', alarms[0].user_id) - self.assertEqual(self.auth_headers['X-Project-Id'], - alarms[0].project_id) - self._verify_alarm(json, alarms[0], 'added_alarm') - - @staticmethod - def _alarm_representation_owned_by(identifiers): - json = { - 'name': 'added_alarm', - 'enabled': False, - 'type': 'threshold', - 'ok_actions': ['http://something/ok'], - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - for aspect, id in six.iteritems(identifiers): - json['%s_id' % aspect] = id - return json - - def _do_test_post_alarm_as_nonadmin_on_behalf_of_another(self, - identifiers): - """Test posting an alarm. - - Test that posting an alarm as non-admin on behalf of another - user/project fails with an explicit 401 instead of reverting - to the requestor's identity. - """ - json = self._alarm_representation_owned_by(identifiers) - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'demo' - resp = self.post_json('/alarms', params=json, status=401, - headers=headers) - aspect = 'user' if 'user' in identifiers else 'project' - params = dict(aspect=aspect, id=identifiers[aspect]) - self.assertEqual("Not Authorized to access %(aspect)s %(id)s" % params, - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_post_alarm_as_nonadmin_on_behalf_of_another_user(self): - identifiers = dict(user='auseridthatisnotmine') - self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_another_project(self): - identifiers = dict(project='aprojectidthatisnotmine') - self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_another_creds(self): - identifiers = dict(user='auseridthatisnotmine', - project='aprojectidthatisnotmine') - self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) - - def _do_test_post_alarm_as_nonadmin_on_behalf_of_self(self, identifiers): - """Test posting an alarm. - - Test posting an alarm as non-admin on behalf of own user/project - creates alarm associated with the requestor's identity. - """ - json = self._alarm_representation_owned_by(identifiers) - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'demo' - self.post_json('/alarms', params=json, status=201, headers=headers) - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self.assertEqual(alarms[0].user_id, - self.auth_headers['X-User-Id']) - self.assertEqual(alarms[0].project_id, - self.auth_headers['X-Project-Id']) - - def test_post_alarm_as_nonadmin_on_behalf_of_own_user(self): - identifiers = dict(user=self.auth_headers['X-User-Id']) - self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_own_project(self): - identifiers = dict(project=self.auth_headers['X-Project-Id']) - self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) - - def test_post_alarm_as_nonadmin_on_behalf_of_own_creds(self): - identifiers = dict(user=self.auth_headers['X-User-Id'], - project=self.auth_headers['X-Project-Id']) - self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) - - def test_post_alarm_with_mismatch_between_type_and_rule(self): - """Test the creation of an combination alarm with threshold rule.""" - json = { - 'enabled': False, - 'name': 'added_alarm', - 'state': 'ok', - 'type': 'gnocchi_resources_threshold', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - } - } - resp = self.post_json('/alarms', params=json, - expect_errors=True, status=400, - headers=self.auth_headers) - self.assertEqual( - "gnocchi_resources_threshold_rule must " - "be set for gnocchi_resources_threshold type alarm", - resp.json['error_message']['faultstring']) - - def test_post_alarm_with_duplicate_actions(self): - body = { - 'name': 'dup-alarm-actions', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - 'alarm_actions': ['http://no.where', 'http://no.where'] - } - resp = self.post_json('/alarms', params=body, - headers=self.auth_headers) - self.assertEqual(201, resp.status_code) - alarms = list(self.alarm_conn.get_alarms(name='dup-alarm-actions')) - self.assertEqual(1, len(alarms)) - self.assertEqual(['http://no.where'], alarms[0].alarm_actions) - - def test_post_alarm_with_too_many_actions(self): - self.CONF.set_override('alarm_max_actions', 1, group='api') - body = { - 'name': 'alarm-with-many-actions', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - 'alarm_actions': ['http://no.where', 'http://no.where2'] - } - resp = self.post_json('/alarms', params=body, expect_errors=True, - headers=self.auth_headers) - self.assertEqual(400, resp.status_code) - self.assertEqual("alarm_actions count exceeds maximum value 1", - resp.json['error_message']['faultstring']) - - def test_post_alarm_normal_user_set_log_actions(self): - body = { - 'name': 'log_alarm_actions', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - 'alarm_actions': ['log://'] - } - resp = self.post_json('/alarms', params=body, expect_errors=True, - headers=self.auth_headers) - self.assertEqual(401, resp.status_code) - expected_msg = ("You are not authorized to create action: log://") - self.assertEqual(expected_msg, - resp.json['error_message']['faultstring']) - - def test_post_alarm_normal_user_set_test_actions(self): - body = { - 'name': 'test_alarm_actions', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - 'alarm_actions': ['test://'] - } - resp = self.post_json('/alarms', params=body, expect_errors=True, - headers=self.auth_headers) - self.assertEqual(401, resp.status_code) - expected_msg = ("You are not authorized to create action: test://") - self.assertEqual(expected_msg, - resp.json['error_message']['faultstring']) - - def test_post_alarm_admin_user_set_log_test_actions(self): - body = { - 'name': 'admin_alarm_actions', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - 'alarm_actions': ['test://', 'log://'] - } - headers = self.auth_headers - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=body, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(name='admin_alarm_actions')) - self.assertEqual(1, len(alarms)) - self.assertEqual(['test://', 'log://'], - alarms[0].alarm_actions) - - def test_exercise_state_reason(self): - body = { - 'name': 'nostate', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - } - headers = self.auth_headers - headers['X-Roles'] = 'admin' - - self.post_json('/alarms', params=body, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(name='nostate')) - self.assertEqual(1, len(alarms)) - alarm_id = alarms[0].alarm_id - - alarm = self._get_alarm(alarm_id) - self.assertEqual("insufficient data", alarm['state']) - self.assertEqual("Not evaluated yet", alarm['state_reason']) - - # Ensure state reason is updated - alarm = self._get_alarm('a') - alarm['state'] = 'ok' - self.put_json('/alarms/%s' % alarm_id, - params=alarm, - headers=self.auth_headers) - alarm = self._get_alarm(alarm_id) - self.assertEqual("ok", alarm['state']) - self.assertEqual("Manually set via API", alarm['state_reason']) - - # Ensure state reason read only - alarm = self._get_alarm('a') - alarm['state'] = 'alarm' - alarm['state_reason'] = 'oh no!' - self.put_json('/alarms/%s' % alarm_id, - params=alarm, - headers=self.auth_headers) - - alarm = self._get_alarm(alarm_id) - self.assertEqual("alarm", alarm['state']) - self.assertEqual("Manually set via API", alarm['state_reason']) - - def test_post_alarm_without_actions(self): - body = { - 'name': 'alarm_actions_none', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': '3', - 'period': '180', - }, - 'alarm_actions': None - } - headers = self.auth_headers - headers['X-Roles'] = 'admin' - self.post_json('/alarms', params=body, status=201, - headers=headers) - alarms = list(self.alarm_conn.get_alarms(name='alarm_actions_none')) - self.assertEqual(1, len(alarms)) - - # FIXME(sileht): This should really returns [] not None - # but SQL just stores the json dict as is... - # migration script for sql will be a mess because we have - # to parse all JSON :( - # I guess we assume that wsme convert the None input to [] - # because of the array type, but it won't... - self.assertIsNone(alarms[0].alarm_actions) - - def test_post_alarm_trust(self): - json = { - 'name': 'added_alarm_defaults', - 'type': 'threshold', - 'ok_actions': ['trust+http://my.server:1234/foo'], - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - auth = mock.Mock() - trust_client = mock.Mock() - with mock.patch('aodh.keystone_client.get_client') as client: - mock_session = mock.Mock() - mock_session.get_user_id.return_value = 'my_user' - client.return_value = mock.Mock(session=mock_session) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - trust_client.trusts.create.return_value = mock.Mock(id='5678') - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers, - extra_environ={'keystone.token_auth': auth}) - trust_client.trusts.create.assert_called_once_with( - trustor_user=self.auth_headers['X-User-Id'], - trustee_user='my_user', - project=self.auth_headers['X-Project-Id'], - impersonation=True, - role_names=[]) - alarms = list(self.alarm_conn.get_alarms()) - for alarm in alarms: - if alarm.name == 'added_alarm_defaults': - self.assertEqual( - ['trust+http://5678:delete@my.server:1234/foo'], - alarm.ok_actions) - break - else: - self.fail("Alarm not found") - - with mock.patch('aodh.keystone_client.get_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - self.delete('/alarms/%s' % alarm.alarm_id, - headers=self.auth_headers, - status=204, - extra_environ={'keystone.token_auth': auth}) - trust_client.trusts.delete.assert_called_once_with('5678') - - def test_put_alarm(self): - json = { - 'enabled': False, - 'name': 'name_put', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - self.put_json('/alarms/%s' % alarm_id, - params=json, - headers=self.auth_headers) - alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, - enabled=False))[0] - json['threshold_rule']['query'].append({ - 'field': 'project_id', 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}) - self._verify_alarm(json, alarm) - - def test_put_alarm_as_admin(self): - json = { - 'user_id': 'myuserid', - 'project_id': 'myprojectid', - 'enabled': False, - 'name': 'name_put', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}, - {'field': 'project_id', 'op': 'eq', - 'value': 'myprojectid'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - headers = {} - headers.update(self.auth_headers) - headers['X-Roles'] = 'admin' - - data = self.get_json('/alarms', - headers=headers, - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - self.put_json('/alarms/%s' % alarm_id, - params=json, - headers=headers) - alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, - enabled=False))[0] - self.assertEqual('myuserid', alarm.user_id) - self.assertEqual('myprojectid', alarm.project_id) - self._verify_alarm(json, alarm) - - def test_put_alarm_wrong_field(self): - json = { - 'this_can_not_be_correct': 'ha', - 'enabled': False, - 'name': 'name1', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'name1', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - expect_errors=True, - params=json, - headers=self.auth_headers) - self.assertEqual(400, resp.status_code) - - def test_put_alarm_with_existing_name(self): - """Test that update a threshold alarm with an existing name.""" - json = { - 'enabled': False, - 'name': 'name1', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'name2', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - params=json, - headers=self.auth_headers) - self.assertEqual(200, resp.status_code) - - def test_put_invalid_alarm_actions(self): - json = { - 'enabled': False, - 'name': 'name1', - 'state': 'ok', - 'type': 'threshold', - 'severity': 'critical', - 'ok_actions': ['spam://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.field', - 'op': 'eq', - 'value': '5', - 'type': 'string'}], - 'comparison_operator': 'le', - 'statistic': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'period': 180, - } - } - data = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'name2', - }]) - self.assertEqual(1, len(data)) - alarm_id = data[0]['alarm_id'] - - resp = self.put_json('/alarms/%s' % alarm_id, - expect_errors=True, status=400, - params=json, - headers=self.auth_headers) - self.assertEqual( - 'Unsupported action spam://something/ok', - resp.json['error_message']['faultstring']) - - def test_put_alarm_trust(self): - data = self._get_alarm('a') - data.update({'ok_actions': ['trust+http://something/ok']}) - trust_client = mock.Mock() - with mock.patch('aodh.keystone_client.get_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - trust_client.trusts.create.return_value = mock.Mock(id='5678') - self.put_json('/alarms/%s' % data['alarm_id'], - params=data, - headers=self.auth_headers) - data = self._get_alarm('a') - self.assertEqual( - ['trust+http://5678:delete@something/ok'], data['ok_actions']) - - data.update({'ok_actions': ['http://no-trust-something/ok']}) - - with mock.patch('aodh.keystone_client.get_client') as client: - client.return_value = mock.Mock( - auth_ref=mock.Mock(user_id='my_user')) - with mock.patch('keystoneclient.v3.client.Client') as sub_client: - sub_client.return_value = trust_client - self.put_json('/alarms/%s' % data['alarm_id'], - params=data, - headers=self.auth_headers) - trust_client.trusts.delete.assert_called_once_with('5678') - - data = self._get_alarm('a') - self.assertEqual( - ['http://no-trust-something/ok'], data['ok_actions']) - - def test_delete_alarm(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(3, len(data)) - - resp = self.delete('/alarms/%s' % data[0]['alarm_id'], - headers=self.auth_headers, - status=204) - self.assertEqual(b'', resp.body) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(2, len(alarms)) - - def test_get_state_alarm(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(3, len(data)) - - resp = self.get_json('/alarms/%s/state' % data[0]['alarm_id'], - headers=self.auth_headers) - self.assertEqual(resp, data[0]['state']) - - def test_set_state_alarm(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(3, len(data)) - - resp = self.put_json('/alarms/%s/state' % data[0]['alarm_id'], - headers=self.auth_headers, - params='alarm') - alarms = list(self.alarm_conn.get_alarms(alarm_id=data[0]['alarm_id'])) - self.assertEqual(1, len(alarms)) - self.assertEqual('alarm', alarms[0].state) - self.assertEqual('Manually set via API', - alarms[0].state_reason) - self.assertEqual('alarm', resp.json) - - def test_set_invalid_state_alarm(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(3, len(data)) - - self.put_json('/alarms/%s/state' % data[0]['alarm_id'], - headers=self.auth_headers, - params='not valid', - status=400) - - def test_alarms_sends_notification(self): - # Hit the AlarmsController ... - json = { - 'name': 'sent_notification', - 'type': 'threshold', - 'severity': 'low', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - } - - } - with mock.patch.object(messaging, 'get_notifier') as get_notifier: - notifier = get_notifier.return_value - self.post_json('/alarms', params=json, headers=self.auth_headers) - get_notifier.assert_called_once_with(mock.ANY, - publisher_id='aodh.api') - calls = notifier.info.call_args_list - self.assertEqual(1, len(calls)) - args, _ = calls[0] - context, event_type, payload = args - self.assertEqual('alarm.creation', event_type) - self.assertEqual('sent_notification', payload['detail']['name']) - self.assertEqual('ameter', payload['detail']['rule']['meter_name']) - self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', - 'project_id', 'timestamp', 'type', - 'user_id']).issubset(payload.keys())) - - def test_alarm_sends_notification(self): - with mock.patch.object(messaging, 'get_notifier') as get_notifier: - notifier = get_notifier.return_value - self._update_alarm('a', dict(name='new_name')) - get_notifier.assert_called_once_with(mock.ANY, - publisher_id='aodh.api') - calls = notifier.info.call_args_list - self.assertEqual(1, len(calls)) - args, _ = calls[0] - context, event_type, payload = args - self.assertEqual('alarm.rule_change', event_type) - self.assertEqual('new_name', payload['detail']['name']) - self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', - 'project_id', 'timestamp', 'type', - 'user_id']).issubset(payload.keys())) - - def test_delete_alarm_sends_notification(self): - with mock.patch.object(messaging, 'get_notifier') as get_notifier: - notifier = get_notifier.return_value - self._delete_alarm(default_alarms(self.auth_headers)[1].alarm_id) - get_notifier.assert_called_once_with(mock.ANY, - publisher_id='aodh.api') - calls = notifier.info.call_args_list - self.assertEqual(1, len(calls)) - args, _ = calls[0] - context, event_type, payload = args - self.assertEqual('alarm.deletion', event_type) - self.assertEqual('insufficient data', payload['detail']['state']) - self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', - 'project_id', 'timestamp', 'type', 'severity', - 'user_id']).issubset(payload.keys())) - - -class TestAlarmsHistory(TestAlarmsBase): - - def setUp(self): - super(TestAlarmsHistory, self).setUp() - alarm = models.Alarm( - name='name1', - type='threshold', - enabled=True, - alarm_id='a', - description='a', - state='insufficient data', - state_reason='insufficient data', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[dict(name='testcons', - start='0 11 * * *', - duration=300)], - rule=dict(comparison_operator='gt', - threshold=2.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[dict(field='project_id', - op='eq', - value=self.auth_headers['X-Project-Id']) - ])) - self.alarm_conn.create_alarm(alarm) - - def _get_alarm_history(self, alarm_id, auth_headers=None, query=None, - expect_errors=False, status=200): - url = '/alarms/%s/history' % alarm_id - if query: - url += '?q.op=%(op)s&q.value=%(value)s&q.field=%(field)s' % query - resp = self.get_json(url, - headers=auth_headers or self.auth_headers, - expect_errors=expect_errors) - if expect_errors: - self.assertEqual(status, resp.status_code) - return resp - - def _assert_is_subset(self, expected, actual): - for k, v in six.iteritems(expected): - current = actual.get(k) - if k == 'detail' and isinstance(v, dict): - current = jsonutils.loads(current) - self.assertEqual(v, current, 'mismatched field: %s' % k) - self.assertIsNotNone(actual['event_id']) - - def _assert_in_json(self, expected, actual): - actual = jsonutils.dumps(jsonutils.loads(actual), sort_keys=True) - for k, v in six.iteritems(expected): - fragment = jsonutils.dumps({k: v}, sort_keys=True)[1:-1] - self.assertIn(fragment, actual, - '%s not in %s' % (fragment, actual)) - - def test_record_alarm_history_config(self): - self.CONF.set_override('record_history', False) - history = self._get_alarm_history('a') - self.assertEqual([], history) - self._update_alarm('a', dict(name='renamed')) - history = self._get_alarm_history('a') - self.assertEqual([], history) - self.CONF.set_override('record_history', True) - self._update_alarm('a', dict(name='foobar')) - history = self._get_alarm_history('a') - self.assertEqual(1, len(history)) - - def test_record_alarm_history_severity(self): - alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual([], history) - self.assertEqual('critical', alarm['severity']) - - self._update_alarm('a', dict(severity='low')) - new_alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual(1, len(history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - history[0]['detail']) - self.assertEqual('low', new_alarm['severity']) - - def test_record_alarm_history_statistic(self): - alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual([], history) - self.assertEqual('avg', alarm['threshold_rule']['statistic']) - - rule = alarm['threshold_rule'].copy() - rule['statistic'] = 'min' - data = dict(threshold_rule=rule) - self._update_alarm('a', data) - new_alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual(1, len(history)) - self.assertEqual("min", jsonutils.loads(history[0]['detail']) - ['rule']["statistic"]) - self.assertEqual('min', new_alarm['threshold_rule']['statistic']) - - def test_redundant_update_alarm_property_no_history_change(self): - alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual([], history) - self.assertEqual('critical', alarm['severity']) - - self._update_alarm('a', dict(severity='low')) - new_alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual(1, len(history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - history[0]['detail']) - self.assertEqual('low', new_alarm['severity']) - - self._update_alarm('a', dict(severity='low')) - updated_history = self._get_alarm_history('a') - self.assertEqual(1, len(updated_history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - updated_history[0]['detail']) - self.assertEqual(history, updated_history) - - def test_get_recorded_alarm_history_on_create(self): - new_alarm = { - 'name': 'new_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'period': 60, - 'evaluation_periods': 1, - } - } - self.post_json('/alarms', params=new_alarm, status=201, - headers=self.auth_headers) - - alarms = self.get_json('/alarms', - headers=self.auth_headers, - q=[{'field': 'name', - 'value': 'new_alarm', - }]) - self.assertEqual(1, len(alarms)) - alarm = alarms[0] - - history = self._get_alarm_history(alarm['alarm_id']) - self.assertEqual(1, len(history)) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='creation', - user_id=alarm['user_id']), - history[0]) - self._add_default_threshold_rule(new_alarm) - new_alarm['rule'] = new_alarm['threshold_rule'] - del new_alarm['threshold_rule'] - new_alarm['rule']['query'].append({ - 'field': 'project_id', 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}) - self._assert_in_json(new_alarm, history[0]['detail']) - - def _do_test_get_recorded_alarm_history_on_update(self, - data, - type, - detail, - auth=None): - alarm = self._get_alarm('a') - history = self._get_alarm_history('a') - self.assertEqual([], history) - self._update_alarm('a', data, auth) - history = self._get_alarm_history('a') - self.assertEqual(1, len(history)) - project_id = auth['X-Project-Id'] if auth else alarm['project_id'] - user_id = auth['X-User-Id'] if auth else alarm['user_id'] - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - on_behalf_of=alarm['project_id'], - project_id=project_id, - type=type, - user_id=user_id), - history[0]) - - def test_get_recorded_alarm_history_rule_change(self): - data = dict(name='renamed') - detail = '{"name": "renamed"}' - self._do_test_get_recorded_alarm_history_on_update(data, - 'rule change', - detail) - - def test_get_recorded_alarm_history_state_transition_on_behalf_of(self): - # credentials for new non-admin user, on who's behalf the alarm - # is created - member_user = uuidutils.generate_uuid() - member_project = uuidutils.generate_uuid() - member_auth = {'X-Roles': 'member', - 'X-User-Id': member_user, - 'X-Project-Id': member_project} - new_alarm = { - 'name': 'new_alarm', - 'type': 'threshold', - 'state': 'ok', - 'threshold_rule': { - 'meter_name': 'other_meter', - 'query': [{'field': 'project_id', - 'op': 'eq', - 'value': member_project}], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'evaluation_periods': 1, - 'period': 60 - } - } - self.post_json('/alarms', params=new_alarm, status=201, - headers=member_auth) - alarm = self.get_json('/alarms', headers=member_auth)[0] - - # effect a state transition as a new administrative user - admin_user = uuidutils.generate_uuid() - admin_project = uuidutils.generate_uuid() - admin_auth = {'X-Roles': 'admin', - 'X-User-Id': admin_user, - 'X-Project-Id': admin_project} - data = dict(state='alarm') - self._update_alarm(alarm['alarm_id'], data, auth_headers=admin_auth) - - self._add_default_threshold_rule(new_alarm) - new_alarm['rule'] = new_alarm['threshold_rule'] - del new_alarm['threshold_rule'] - - # ensure that both the creation event and state transition - # are visible to the non-admin alarm owner and admin user alike - for auth in [member_auth, admin_auth]: - history = self._get_alarm_history(alarm['alarm_id'], - auth_headers=auth) - self.assertEqual(2, len(history), 'hist: %s' % history) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail={"state": "alarm", - "state_reason": - "Manually set via API"}, - on_behalf_of=alarm['project_id'], - project_id=admin_project, - type='rule change', - user_id=admin_user), - history[0]) - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - on_behalf_of=alarm['project_id'], - project_id=member_project, - type='creation', - user_id=member_user), - history[1]) - self._assert_in_json(new_alarm, history[1]['detail']) - - # ensure on_behalf_of cannot be constrained in an API call - query = dict(field='on_behalf_of', - op='eq', - value=alarm['project_id']) - self._get_alarm_history(alarm['alarm_id'], auth_headers=auth, - query=query, expect_errors=True, - status=400) - - def test_get_recorded_alarm_history_segregation(self): - data = dict(name='renamed') - detail = '{"name": "renamed"}' - self._do_test_get_recorded_alarm_history_on_update(data, - 'rule change', - detail) - auth = {'X-Roles': 'member', - 'X-User-Id': uuidutils.generate_uuid(), - 'X-Project-Id': uuidutils.generate_uuid()} - self._get_alarm_history('a', auth_headers=auth, - expect_errors=True, status=404) - - def test_delete_alarm_history_after_deletion(self): - self._update_alarm('a', dict(name='renamed')) - history = self._get_alarm_history('a') - self.assertEqual(1, len(history)) - self.delete('/alarms/%s' % 'a', - headers=self.auth_headers, - status=204) - self._get_alarm_history('a', expect_errors=True, status=404) - - def test_get_alarm_history_ordered_by_recentness(self): - for i in moves.xrange(10): - self._update_alarm('a', dict(name='%s' % i)) - history = self._get_alarm_history('a') - self.assertEqual(10, len(history), 'hist: %s' % history) - self._assert_is_subset(dict(alarm_id='a', - type='rule change'), - history[0]) - for i in moves.xrange(1, 11): - detail = '{"name": "%s"}' % (10 - i) - self._assert_is_subset(dict(alarm_id='a', - detail=detail, - type='rule change'), - history[i - 1]) - - def test_get_alarm_history_constrained_by_timestamp(self): - alarm = self._get_alarm('a') - self._update_alarm('a', dict(name='renamed')) - after = datetime.datetime.utcnow().isoformat() - query = dict(field='timestamp', op='gt', value=after) - history = self._get_alarm_history('a', query=query) - self.assertEqual(0, len(history)) - query['op'] = 'le' - history = self._get_alarm_history('a', query=query) - self.assertEqual(1, len(history)) - detail = '{"name": "renamed"}' - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='rule change', - user_id=alarm['user_id']), - history[0]) - - def test_get_alarm_history_constrained_by_type(self): - alarm = self._get_alarm('a') - self._update_alarm('a', dict(name='renamed2')) - query = dict(field='type', op='eq', value='rule change') - history = self._get_alarm_history('a', query=query) - self.assertEqual(1, len(history)) - detail = '{"name": "renamed2"}' - self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], - detail=detail, - on_behalf_of=alarm['project_id'], - project_id=alarm['project_id'], - type='rule change', - user_id=alarm['user_id']), - history[0]) - - def test_get_alarm_history_constrained_by_alarm_id_failed(self): - query = dict(field='alarm_id', op='eq', value='a') - resp = self._get_alarm_history('a', query=query, - expect_errors=True, status=400) - msg = ('Unknown argument: "alarm_id": unrecognized' - " field in query: [], valid keys: ['project', " - "'search_offset', 'severity', 'timestamp'," - " 'type', 'user']") - msg = msg.format(key=u'alarm_id', value=u'a') - self.assertEqual(msg, - resp.json['error_message']['faultstring']) - - def test_get_alarm_history_constrained_by_not_supported_rule(self): - query = dict(field='abcd', op='eq', value='abcd') - resp = self._get_alarm_history('a', query=query, - expect_errors=True, status=400) - msg = ('Unknown argument: "abcd": unrecognized' - " field in query: [], valid keys: ['project', " - "'search_offset', 'severity', 'timestamp'," - " 'type', 'user']") - msg = msg.format(key=u'abcd', value=u'abcd') - self.assertEqual(msg, - resp.json['error_message']['faultstring']) - - def test_get_alarm_history_constrained_by_severity(self): - self._update_alarm('a', dict(severity='low')) - query = dict(field='severity', op='eq', value='low') - history = self._get_alarm_history('a', query=query) - self.assertEqual(1, len(history)) - self.assertEqual(jsonutils.dumps({'severity': 'low'}), - history[0]['detail']) - - def test_get_nonexistent_alarm_history(self): - self._get_alarm_history('foobar', expect_errors=True, status=404) - - -class TestAlarmsQuotas(TestAlarmsBase): - - def _test_alarm_quota(self): - alarm = { - 'name': 'alarm', - 'type': 'threshold', - 'user_id': self.auth_headers['X-User-Id'], - 'project_id': self.auth_headers['X-Project-Id'], - 'threshold_rule': { - 'meter_name': 'testmeter', - 'query': [], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'period': 60, - 'evaluation_periods': 1, - } - } - - resp = self.post_json('/alarms', params=alarm, - headers=self.auth_headers) - self.assertEqual(201, resp.status_code) - alarms = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(1, len(alarms)) - - alarm['name'] = 'another_user_alarm' - resp = self.post_json('/alarms', params=alarm, - expect_errors=True, - headers=self.auth_headers) - self.assertEqual(403, resp.status_code) - faultstring = 'Alarm quota exceeded for user' - self.assertIn(faultstring, - resp.json['error_message']['faultstring']) - - alarms = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(1, len(alarms)) - - def test_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 1, 'api') - self.CONF.set_override('project_alarm_quota', 1, 'api') - self._test_alarm_quota() - - def test_project_alarms_quotas(self): - self.CONF.set_override('project_alarm_quota', 1, 'api') - self._test_alarm_quota() - - def test_user_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 1, 'api') - self._test_alarm_quota() - - def test_larger_limit_project_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 1, 'api') - self.CONF.set_override('project_alarm_quota', 2, 'api') - self._test_alarm_quota() - - def test_larger_limit_user_alarms_quotas(self): - self.CONF.set_override('user_alarm_quota', 2, 'api') - self.CONF.set_override('project_alarm_quota', 1, 'api') - self._test_alarm_quota() - - def test_larger_limit_user_alarm_quotas_multitenant_user(self): - self.CONF.set_override('user_alarm_quota', 2, 'api') - self.CONF.set_override('project_alarm_quota', 1, 'api') - - def _test(field, value): - query = [{ - 'field': field, - 'op': 'eq', - 'value': value - }] - alarms = self.get_json('/alarms', q=query, - headers=self.auth_headers) - self.assertEqual(1, len(alarms)) - - alarm = { - 'name': 'alarm', - 'type': 'threshold', - 'user_id': self.auth_headers['X-User-Id'], - 'project_id': self.auth_headers['X-Project-Id'], - 'threshold_rule': { - 'meter_name': 'testmeter', - 'query': [], - 'comparison_operator': 'le', - 'statistic': 'max', - 'threshold': 42.0, - 'period': 60, - 'evaluation_periods': 1, - } - } - - resp = self.post_json('/alarms', params=alarm, - headers=self.auth_headers) - - self.assertEqual(201, resp.status_code) - _test('project_id', self.auth_headers['X-Project-Id']) - - self.auth_headers['X-Project-Id'] = uuidutils.generate_uuid() - alarm['name'] = 'another_user_alarm' - alarm['project_id'] = self.auth_headers['X-Project-Id'] - resp = self.post_json('/alarms', params=alarm, - headers=self.auth_headers) - - self.assertEqual(201, resp.status_code) - _test('project_id', self.auth_headers['X-Project-Id']) - - self.auth_headers["X-roles"] = "admin" - alarms = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(2, len(alarms)) - - -class TestAlarmsRuleThreshold(TestAlarmsBase): - - def test_post_invalid_alarm_statistic(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'magic', - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " statistic. Value: 'magic'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - - def test_post_invalid_alarm_input_comparison_operator(self): - json = { - 'name': 'alarm2', - 'state': 'ok', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'comparison_operator': 'bad_co', - 'threshold': 50.0 - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_err_msg = ("Invalid input for field/attribute" - " comparison_operator." - " Value: 'bad_co'.") - self.assertIn(expected_err_msg, - resp.json['error_message']['faultstring']) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - - def test_post_invalid_alarm_query(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.invalid', - 'field': 'gt', - 'value': 'value'}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - } - } - self.post_json('/alarms', params=json, expect_errors=True, status=400, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - - def test_post_invalid_alarm_query_field_type(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.valid', - 'op': 'eq', - 'value': 'value', - 'type': 'blob'}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - 'statistic': 'avg', - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_error_message = 'The data type blob is not supported.' - resp_string = jsonutils.loads(resp.body) - fault_string = resp_string['error_message']['faultstring'] - self.assertTrue(fault_string.startswith(expected_error_message)) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - - def test_post_invalid_alarm_query_non_field(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'q.field': 'metadata.valid', - 'value': 'value'}], - 'threshold': 2.0, - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_error_message = ("Unknown attribute for argument " - "data.threshold_rule.query: q.field") - fault_string = resp.json['error_message']['faultstring'] - self.assertEqual(expected_error_message, fault_string) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - - def test_post_invalid_alarm_query_non_value(self): - json = { - 'name': 'added_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'metadata.valid', - 'q.value': 'value'}], - 'threshold': 2.0, - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - expected_error_message = ("Unknown attribute for argument " - "data.threshold_rule.query: q.value") - fault_string = resp.json['error_message']['faultstring'] - self.assertEqual(expected_error_message, fault_string) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - - def test_post_invalid_alarm_timestamp_in_threshold_rule(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - - json = { - 'name': 'invalid_alarm', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - 'comparison_operator': 'gt', - 'threshold': 2.0, - } - } - resp = self.post_json('/alarms', params=json, expect_errors=True, - status=400, headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(0, len(alarms)) - self.assertEqual( - 'Unknown argument: "timestamp": ' - 'not valid for this resource', - resp.json['error_message']['faultstring']) - - def test_post_threshold_rule_defaults(self): - to_check = { - 'name': 'added_alarm_defaults', - 'state': 'insufficient data', - 'description': ('Alarm when ameter is eq a avg of ' - '300.0 over 60 seconds'), - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'query': [{'field': 'project_id', - 'op': 'eq', - 'value': self.auth_headers['X-Project-Id']}], - 'threshold': 300.0, - 'comparison_operator': 'eq', - 'statistic': 'avg', - 'evaluation_periods': 1, - 'period': 60, - } - - } - self._add_default_threshold_rule(to_check) - - json = { - 'name': 'added_alarm_defaults', - 'type': 'threshold', - 'threshold_rule': { - 'meter_name': 'ameter', - 'threshold': 300.0 - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(1, len(alarms)) - for alarm in alarms: - if alarm.name == 'added_alarm_defaults': - for key in to_check: - if key.endswith('_rule'): - storage_key = 'rule' - else: - storage_key = key - self.assertEqual(to_check[key], - getattr(alarm, storage_key)) - break - else: - self.fail("Alarm not found") - - -class TestAlarmsRuleGnocchi(TestAlarmsBase): - - def setUp(self): - super(TestAlarmsRuleGnocchi, self).setUp() - for alarm in [ - models.Alarm(name='name1', - type='gnocchi_resources_threshold', - enabled=True, - alarm_id='e', - description='e', - state='insufficient data', - state_reason='Not evaluated', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - aggregation_method='mean', - granularity=60, - evaluation_periods=1, - metric='meter.test', - resource_type='instance', - resource_id=( - '6841c175-d7c4-4bc2-bc7a-1c7832271b8f'), - ) - ), - models.Alarm(name='name2', - type='gnocchi_aggregation_by_metrics_threshold', - enabled=True, - alarm_id='f', - description='f', - state='insufficient data', - state_reason='Not evaluated', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - aggregation_method='mean', - evaluation_periods=1, - granularity=60, - metrics=[ - '41869681-5776-46d6-91ed-cccc43b6e4e3', - 'a1fb80f4-c242-4f57-87c6-68f47521059e'] - ), - ), - models.Alarm(name='name3', - type='gnocchi_aggregation_by_resources_threshold', - enabled=True, - alarm_id='g', - description='f', - state='insufficient data', - state_reason='Not evaluated', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - aggregation_method='mean', - granularity=60, - evaluation_periods=1, - metric='meter.test', - resource_type='instance', - query='{"=": {"server_group": ' - '"my_autoscaling_group"}}') - ), - - ]: - - self.alarm_conn.create_alarm(alarm) - - def test_list_alarms(self): - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(3, len(data)) - self.assertEqual(set(['name1', 'name2', 'name3']), - set(r['name'] for r in data)) - self.assertEqual(set(['meter.test']), - set(r['gnocchi_resources_threshold_rule']['metric'] - for r in data - if 'gnocchi_resources_threshold_rule' in r)) - - def test_post_gnocchi_metrics_alarm_cached(self): - # NOTE(gordc): cache is a decorator and therefore, gets mocked across - # entire scenario. ideally we should test both scenario but tough. - # assume cache will return aggregation_method == ['count'] always. - json = { - 'enabled': False, - 'name': 'name_post', - 'state': 'ok', - 'type': 'gnocchi_aggregation_by_metrics_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_aggregation_by_metrics_threshold_rule': { - 'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb', - '009d4faf-c275-46f0-8f2d-670b15bac2b0'], - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - } - } - - with mock.patch('aodh.api.controllers.v2.alarm_rules.' - 'gnocchi.client') as clientlib: - c = clientlib.Client.return_value - c.capabilities.list.return_value = { - 'aggregation_methods': ['count']} - self.post_json('/alarms', params=json, headers=self.auth_headers) - - with mock.patch('aodh.api.controllers.v2.alarm_rules.' - 'gnocchi.client') as clientlib: - self.post_json('/alarms', params=json, headers=self.auth_headers) - self.assertFalse(clientlib.called) - - def test_post_gnocchi_resources_alarm(self): - json = { - 'enabled': False, - 'name': 'name_post', - 'state': 'ok', - 'type': 'gnocchi_resources_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_resources_threshold_rule': { - 'metric': 'ameter', - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - 'resource_type': 'instance', - 'resource_id': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e', - } - } - - with mock.patch('aodh.api.controllers.v2.alarm_rules.' - 'gnocchi.client') as clientlib: - c = clientlib.Client.return_value - c.capabilities.list.return_value = { - 'aggregation_methods': ['count']} - self.post_json('/alarms', params=json, headers=self.auth_headers) - - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self._verify_alarm(json, alarms[0]) - - def test_post_gnocchi_metrics_alarm(self): - json = { - 'enabled': False, - 'name': 'name_post', - 'state': 'ok', - 'type': 'gnocchi_aggregation_by_metrics_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_aggregation_by_metrics_threshold_rule': { - 'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb', - '009d4faf-c275-46f0-8f2d-670b15bac2b0'], - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - } - } - - with mock.patch('aodh.api.controllers.v2.alarm_rules.' - 'gnocchi.client') as clientlib: - c = clientlib.Client.return_value - c.capabilities.list.return_value = { - 'aggregation_methods': ['count']} - - self.post_json('/alarms', params=json, headers=self.auth_headers) - - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - self._verify_alarm(json, alarms[0]) - - @mock.patch('aodh.keystone_client.get_client') - def test_post_gnocchi_aggregation_alarm_project_constraint(self, - get_client): - json = { - 'enabled': False, - 'name': 'project_constraint', - 'state': 'ok', - 'type': 'gnocchi_aggregation_by_resources_threshold', - 'severity': 'critical', - 'ok_actions': ['http://something/ok'], - 'alarm_actions': ['http://something/alarm'], - 'insufficient_data_actions': ['http://something/no'], - 'repeat_actions': True, - 'gnocchi_aggregation_by_resources_threshold_rule': { - 'metric': 'ameter', - 'comparison_operator': 'le', - 'aggregation_method': 'count', - 'threshold': 50, - 'evaluation_periods': 3, - 'granularity': 180, - 'resource_type': 'instance', - 'query': '{"=": {"server_group": "my_autoscaling_group"}}', - } - } - - expected_query = {"and": [ - {"or": [ - {"=": {"created_by_project_id": - self.auth_headers['X-Project-Id']}}, - {"and": [ - {"=": {"created_by_project_id": ""}}, - {"=": {"project_id": self.auth_headers['X-Project-Id']}} - ]}, - ]}, - {"=": {"server_group": "my_autoscaling_group"}}, - ]} - - ks_client = mock.Mock() - ks_client.projects.find.return_value = mock.Mock(id='') - get_client.return_value = ks_client - - with mock.patch('aodh.api.controllers.v2.alarm_rules.' - 'gnocchi.client') as clientlib: - c = clientlib.Client.return_value - c.capabilities.list.return_value = { - 'aggregation_methods': ['count']} - self.post_json('/alarms', params=json, headers=self.auth_headers) - - self.assertEqual([mock.call( - aggregation='count', - metrics='ameter', - needed_overlap=0, - query=expected_query, - resource_type="instance")], - c.metric.aggregation.mock_calls), - - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - - json['gnocchi_aggregation_by_resources_threshold_rule']['query'] = ( - jsonutils.dumps(expected_query)) - self._verify_alarm(json, alarms[0]) - - -class TestAlarmsEvent(TestAlarmsBase): - - def test_list_alarms(self): - alarm = models.Alarm(name='event.alarm.1', - type='event', - enabled=True, - alarm_id='h', - description='h', - state='insufficient data', - state_reason='insufficient data', - severity='moderate', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=dict(event_type='event.test', - query=[]), - ) - self.alarm_conn.create_alarm(alarm) - - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(1, len(data)) - self.assertEqual(set(['event.alarm.1']), - set(r['name'] for r in data)) - self.assertEqual(set(['event.test']), - set(r['event_rule']['event_type'] - for r in data if 'event_rule' in r)) - - def test_post_event_alarm_defaults(self): - to_check = { - 'enabled': True, - 'name': 'added_alarm_defaults', - 'state': 'insufficient data', - 'description': 'Alarm when * event occurred.', - 'type': 'event', - 'ok_actions': [], - 'alarm_actions': [], - 'insufficient_data_actions': [], - 'repeat_actions': False, - 'rule': { - 'event_type': '*', - 'query': [], - } - } - - json = { - 'name': 'added_alarm_defaults', - 'type': 'event', - 'event_rule': { - 'event_type': '*', - 'query': [] - } - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(1, len(alarms)) - for alarm in alarms: - if alarm.name == 'added_alarm_defaults': - for key in to_check: - self.assertEqual(to_check[key], getattr(alarm, key)) - break - else: - self.fail("Alarm not found") - - -class TestAlarmsCompositeRule(TestAlarmsBase): - - def setUp(self): - super(TestAlarmsCompositeRule, self).setUp() - self.sub_rule1 = { - "type": "threshold", - "meter_name": "cpu_util", - "evaluation_periods": 5, - "threshold": 0.8, - "query": [{ - "field": "metadata.metering.stack_id", - "value": "36b20eb3-d749-4964-a7d2-a71147cd8147", - "op": "eq" - }], - "statistic": "avg", - "period": 60, - "exclude_outliers": False, - "comparison_operator": "gt" - } - self.sub_rule2 = { - "type": "threshold", - "meter_name": "disk.iops", - "evaluation_periods": 4, - "threshold": 200, - "query": [{ - "field": "metadata.metering.stack_id", - "value": "36b20eb3-d749-4964-a7d2-a71147cd8147", - "op": "eq" - }], - "statistic": "max", - "period": 60, - "exclude_outliers": False, - "comparison_operator": "gt" - } - self.sub_rule3 = { - "type": "threshold", - "meter_name": "network.incoming.packets.rate", - "evaluation_periods": 3, - "threshold": 1000, - "query": [{ - "field": "metadata.metering.stack_id", - "value": - "36b20eb3-d749-4964-a7d2-a71147cd8147", - "op": "eq" - }], - "statistic": "avg", - "period": 60, - "exclude_outliers": False, - "comparison_operator": "gt" - } - - self.rule = { - "or": [self.sub_rule1, - { - "and": [self.sub_rule2, self.sub_rule3] - }]} - - def test_list_alarms(self): - alarm = models.Alarm(name='composite_alarm', - type='composite', - enabled=True, - alarm_id='composite', - description='composite', - state='insufficient data', - state_reason='insufficient data', - severity='moderate', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=False, - user_id=self.auth_headers['X-User-Id'], - project_id=self.auth_headers['X-Project-Id'], - time_constraints=[], - rule=self.rule, - ) - self.alarm_conn.create_alarm(alarm) - - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(1, len(data)) - self.assertEqual(set(['composite_alarm']), - set(r['name'] for r in data)) - self.assertEqual(self.rule, data[0]['composite_rule']) - - def test_post_with_composite_rule(self): - json = { - "type": "composite", - "name": "composite_alarm", - "composite_rule": self.rule, - "repeat_actions": False - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(1, len(alarms)) - self.assertEqual(self.rule, alarms[0].rule) - - def test_post_with_sub_rule_with_wrong_type(self): - self.sub_rule1['type'] = 'non-type' - json = { - "type": "composite", - "name": "composite_alarm", - "composite_rule": self.rule, - "repeat_actions": False - } - response = self.post_json('/alarms', params=json, status=400, - expect_errors=True, - headers=self.auth_headers) - - err = ("Unsupported sub-rule type :non-type in composite " - "rule, should be one of: " - "['gnocchi_aggregation_by_metrics_threshold', " - "'gnocchi_aggregation_by_resources_threshold', " - "'gnocchi_resources_threshold', 'threshold']") - faultstring = response.json['error_message']['faultstring'] - self.assertEqual(err, faultstring) - - def test_post_with_sub_rule_with_only_required_params(self): - sub_rulea = { - "meter_name": "cpu_util", - "threshold": 0.8, - "type": "threshold"} - sub_ruleb = { - "meter_name": "disk.iops", - "threshold": 200, - "type": "threshold"} - json = { - "type": "composite", - "name": "composite_alarm", - "composite_rule": {"and": [sub_rulea, sub_ruleb]}, - "repeat_actions": False - } - self.post_json('/alarms', params=json, status=201, - headers=self.auth_headers) - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(1, len(alarms)) - - def test_post_with_sub_rule_with_invalid_params(self): - self.sub_rule1['threshold'] = False - json = { - "type": "composite", - "name": "composite_alarm", - "composite_rule": self.rule, - "repeat_actions": False - } - response = self.post_json('/alarms', params=json, status=400, - expect_errors=True, - headers=self.auth_headers) - faultstring = ("Invalid input for field/attribute threshold. " - "Value: 'False'. Wrong type. Expected '%s', got '%s'" - % (type(1.0), type(True))) - self.assertEqual(faultstring, - response.json['error_message']['faultstring']) - - -class TestPaginationQuery(TestAlarmsBase): - def setUp(self): - super(TestPaginationQuery, self).setUp() - for alarm in default_alarms(self.auth_headers): - self.alarm_conn.create_alarm(alarm) - - def test_pagination_query_single_sort(self): - data = self.get_json('/alarms?sort=name:desc', - headers=self.auth_headers) - names = [a['name'] for a in data] - self.assertEqual(['name3', 'name2', 'name1'], names) - data = self.get_json('/alarms?sort=name:asc', - headers=self.auth_headers) - names = [a['name'] for a in data] - self.assertEqual(['name1', 'name2', 'name3'], names) - - def test_sort_by_severity_with_its_value(self): - if self.engine != "mysql": - self.skipTest("This is only implemented for MySQL") - data = self.get_json('/alarms?sort=severity:asc', - headers=self.auth_headers) - severities = [a['severity'] for a in data] - self.assertEqual(['moderate', 'critical', 'critical'], - severities) - data = self.get_json('/alarms?sort=severity:desc', - headers=self.auth_headers) - severities = [a['severity'] for a in data] - self.assertEqual(['critical', 'critical', 'moderate'], - severities) - - def test_pagination_query_limit(self): - data = self.get_json('/alarms?limit=2', headers=self.auth_headers) - self.assertEqual(2, len(data)) - - def test_pagination_query_limit_sort(self): - data = self.get_json('/alarms?sort=name:asc&limit=2', - headers=self.auth_headers) - self.assertEqual(2, len(data)) - - def test_pagination_query_marker(self): - data = self.get_json('/alarms?sort=name:desc', - headers=self.auth_headers) - self.assertEqual(3, len(data)) - alarm_ids = [a['alarm_id'] for a in data] - names = [a['name'] for a in data] - self.assertEqual(['name3', 'name2', 'name1'], names) - marker_url = ('/alarms?sort=name:desc&marker=%s' % alarm_ids[1]) - data = self.get_json(marker_url, headers=self.auth_headers) - self.assertEqual(1, len(data)) - new_alarm_ids = [a['alarm_id'] for a in data] - self.assertEqual(alarm_ids[2:], new_alarm_ids) - new_names = [a['name'] for a in data] - self.assertEqual(['name1'], new_names) - - def test_pagination_query_multiple_sorts(self): - new_alarms = default_alarms(self.auth_headers) - for a_id in zip(new_alarms, ['e', 'f', 'g', 'h']): - a_id[0].alarm_id = a_id[1] - self.alarm_conn.create_alarm(a_id[0]) - data = self.get_json('/alarms', headers=self.auth_headers) - self.assertEqual(6, len(data)) - sort_url = '/alarms?sort=name:desc&sort=alarm_id:asc' - data = self.get_json(sort_url, headers=self.auth_headers) - name_ids = [(a['name'], a['alarm_id']) for a in data] - expected = [('name3', 'c'), - ('name3', 'g'), ('name2', 'b'), ('name2', 'f'), - ('name1', 'a'), ('name1', 'e')] - self.assertEqual(expected, name_ids) - - def test_pagination_query_invalid_sort_key(self): - resp = self.get_json('/alarms?sort=invalid_key:desc', - headers=self.auth_headers, - expect_errors=True) - self.assertEqual(resp.status_code, 400) - self.assertEqual("Invalid input for field/attribute sort. Value: " - "'invalid_key:desc'. the sort parameter should be" - " a pair of sort key and sort dir combined with " - "':', or only sort key specified and sort dir will " - "be default 'asc', the supported sort keys are: " - "('alarm_id', 'enabled', 'name', 'type', 'severity'," - " 'timestamp', 'user_id', 'project_id', 'state', " - "'repeat_actions', 'state_timestamp')", - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_pagination_query_only_sort_key_specified(self): - data = self.get_json('/alarms?sort=name', - headers=self.auth_headers) - names = [a['name'] for a in data] - self.assertEqual(['name1', 'name2', 'name3'], names) - - def test_pagination_query_history_data(self): - for i in moves.xrange(10): - self._update_alarm('a', dict(name='%s' % i)) - url = '/alarms/a/history?sort=event_id:desc&sort=timestamp:desc' - data = self.get_json(url, headers=self.auth_headers) - sorted_data = sorted(data, - key=lambda d: (d['event_id'], d['timestamp']), - reverse=True) - self.assertEqual(sorted_data, data) diff --git a/aodh/tests/functional/api/v2/test_app.py b/aodh/tests/functional/api/v2/test_app.py deleted file mode 100644 index 9a78f112..00000000 --- a/aodh/tests/functional/api/v2/test_app.py +++ /dev/null @@ -1,174 +0,0 @@ -# -# Copyright 2013 IBM Corp. -# Copyright 2013 Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test basic aodh-api app -""" -import json - -import mock -import six -import wsme - -from aodh import i18n -from aodh.tests.functional.api import v2 - - -class TestApiMiddleware(v2.FunctionalTest): - - no_lang_translated_error = 'No lang translated error' - en_US_translated_error = 'en-US translated error' - - def _fake_translate(self, message, user_locale): - if user_locale is None: - return self.no_lang_translated_error - else: - return self.en_US_translated_error - - def test_json_parsable_error_middleware_404(self): - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/json"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/json,application/xml"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/xml;q=0.8, \ - application/json"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "text/html,*/*"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - - def test_json_parsable_error_middleware_translation_400(self): - # Ensure translated messages get placed properly into json faults - with mock.patch.object(i18n, 'translate', - side_effect=self._fake_translate): - response = self.post_json('/alarms', params={'name': 'foobar', - 'type': 'threshold'}, - expect_errors=True, - headers={"Accept": - "application/json"} - ) - self.assertEqual(400, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) - self.assertEqual(self.no_lang_translated_error, - response.json['error_message']['faultstring']) - - def test_xml_parsable_error_middleware_404(self): - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/xml,*/*"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - response = self.get_json('/invalid_path', - expect_errors=True, - headers={"Accept": - "application/json;q=0.8 \ - ,application/xml"} - ) - self.assertEqual(404, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - - def test_xml_parsable_error_middleware_translation_400(self): - # Ensure translated messages get placed properly into xml faults - with mock.patch.object(i18n, 'translate', - side_effect=self._fake_translate): - response = self.post_json('/alarms', params={'name': 'foobar', - 'type': 'threshold'}, - expect_errors=True, - headers={"Accept": - "application/xml,*/*"} - ) - self.assertEqual(400, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - fault = response.xml.findall('./error/faultstring') - for fault_string in fault: - self.assertEqual(self.no_lang_translated_error, fault_string.text) - - def test_best_match_language(self): - # Ensure that we are actually invoking language negotiation - with mock.patch.object(i18n, 'translate', - side_effect=self._fake_translate): - response = self.post_json('/alarms', params={'name': 'foobar', - 'type': 'threshold'}, - expect_errors=True, - headers={"Accept": - "application/xml,*/*", - "Accept-Language": - "en-US"} - ) - - self.assertEqual(400, response.status_int) - self.assertEqual("application/xml", response.content_type) - self.assertEqual('error_message', response.xml.tag) - fault = response.xml.findall('./error/faultstring') - for fault_string in fault: - self.assertEqual(self.en_US_translated_error, fault_string.text) - - def test_translated_then_untranslated_error(self): - resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) - self.assertEqual(404, resp.status_code) - body = resp.body - if six.PY3: - body = body.decode('utf-8') - self.assertEqual("Alarm alarm-id-3 not found", - json.loads(body)['error_message'] - ['faultstring']) - - with mock.patch('aodh.api.controllers.' - 'v2.base.AlarmNotFound') as CustomErrorClass: - CustomErrorClass.return_value = wsme.exc.ClientSideError( - "untranslated_error", status_code=404) - resp = self.get_json('/alarms/alarm-id-5', expect_errors=True) - - self.assertEqual(404, resp.status_code) - body = resp.body - if six.PY3: - body = body.decode('utf-8') - self.assertEqual("untranslated_error", - json.loads(body)['error_message'] - ['faultstring']) diff --git a/aodh/tests/functional/api/v2/test_capabilities.py b/aodh/tests/functional/api/v2/test_capabilities.py deleted file mode 100644 index 78c4996e..00000000 --- a/aodh/tests/functional/api/v2/test_capabilities.py +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright Ericsson AB 2014. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from aodh.tests.functional.api import v2 as tests_api - - -class TestCapabilitiesController(tests_api.FunctionalTest): - - def setUp(self): - super(TestCapabilitiesController, self).setUp() - self.url = '/capabilities' - - def test_capabilities(self): - data = self.get_json(self.url) - # check that capabilities data contains both 'api' and 'storage' fields - self.assertIsNotNone(data) - self.assertNotEqual({}, data) - self.assertIn('api', data) - self.assertIn('alarm_storage', data) diff --git a/aodh/tests/functional/api/v2/test_complex_query.py b/aodh/tests/functional/api/v2/test_complex_query.py deleted file mode 100644 index 999b183b..00000000 --- a/aodh/tests/functional/api/v2/test_complex_query.py +++ /dev/null @@ -1,220 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test the methods related to complex query.""" -import datetime - -import fixtures -import jsonschema -import mock -from oslotest import base -import wsme - -from aodh.api.controllers.v2 import query -from aodh.storage import models as alarm_models - - -class FakeComplexQuery(query.ValidatedComplexQuery): - def __init__(self, db_model, additional_name_mapping=None, metadata=False): - super(FakeComplexQuery, self).__init__(query=None, - db_model=db_model, - additional_name_mapping=( - additional_name_mapping or - {}), - metadata_allowed=metadata) - - -class TestComplexQuery(base.BaseTestCase): - def setUp(self): - super(TestComplexQuery, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - 'pecan.response', mock.MagicMock())) - self.query = FakeComplexQuery(alarm_models.Alarm) - self.query_alarmchange = FakeComplexQuery( - alarm_models.AlarmChange) - - def test_replace_isotime_utc(self): - filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_timezone_removed(self): - filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_wrong_syntax(self): - filter_expr = {"=": {"timestamp": "not a valid isotime string"}} - self.assertRaises(wsme.exc.ClientSideError, - self.query._replace_isotime_with_datetime, - filter_expr) - - def test_replace_isotime_in_complex_filter(self): - filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["and"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["and"][1]["="]["timestamp"]) - - def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): - subfilter = {"and": [{"=": {"project_id": 42}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - - filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - subfilter]} - - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["or"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["or"][1]["and"][1]["="]["timestamp"]) - - def test_convert_operator_to_lower_case(self): - filter_expr = {"AND": [{"=": {"project_id": 42}}, - {"=": {"project_id": 44}}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("and", list(filter_expr.keys())[0]) - - filter_expr = {"Or": [{"=": {"project_id": 43}}, - {"anD": [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("or", list(filter_expr.keys())[0]) - self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) - - def test_invalid_filter_misstyped_field_name_samples(self): - filter = {"=": {"project_id11": 42}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_filter_misstyped_field_name_alarms(self): - filter = {"=": {"enabbled": True}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_filter_misstyped_field_name_alarmchange(self): - filter = {"=": {"tpe": "rule change"}} - self.assertRaises(jsonschema.ValidationError, - self.query_alarmchange._validate_filter, - filter) - - def test_invalid_complex_filter_wrong_field_names(self): - filter = {"and": - [{"=": {"non_existing_field": 42}}, - {"=": {"project_id": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"and": - [{"=": {"project_id": 42}}, - {"=": {"non_existing_field": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"and": - [{"=": {"project_id11": 42}}, - {"=": {"project_id": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query_alarmchange._validate_filter, - filter) - - filter = {"or": - [{"=": {"non_existing_field": 42}}, - {"and": - [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"or": - [{"=": {"project_id": 43}}, - {"and": - [{"=": {"project_id": 44}}, - {"=": {"non_existing_field": 42}}]}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_convert_orderby(self): - orderby = [] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([], orderby) - - orderby = [{"project_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "desc"}], orderby) - - orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], - orderby) - - def test_validate_orderby_empty_direction(self): - orderby = [{"project_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"resource_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_order_string(self): - orderby = [{"project_id": "not a valid order"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_multiple_item_order_string(self): - orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_empty_field_name(self): - orderby = [{"": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"": "desc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name(self): - orderby = [{"project_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): - orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_metadata_is_not_allowed(self): - orderby = [{"metadata.display_name": "asc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) diff --git a/aodh/tests/functional/api/v2/test_complex_query_scenarios.py b/aodh/tests/functional/api/v2/test_complex_query_scenarios.py deleted file mode 100644 index 0cb8ed98..00000000 --- a/aodh/tests/functional/api/v2/test_complex_query_scenarios.py +++ /dev/null @@ -1,352 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests complex queries for alarms -""" - -import datetime - -from oslo_utils import timeutils - -from aodh.storage import models -from aodh.tests.functional.api import v2 as tests_api - - -admin_header = {"X-Roles": "admin", - "X-Project-Id": - "project-id1"} -non_admin_header = {"X-Roles": "Member", - "X-Project-Id": - "project-id1"} - - -class TestQueryAlarmsController(tests_api.FunctionalTest): - - def setUp(self): - super(TestQueryAlarmsController, self).setUp() - self.alarm_url = '/query/alarms' - - for state in ['ok', 'alarm', 'insufficient data']: - for date in [datetime.datetime(2013, 1, 1), - datetime.datetime(2013, 2, 2)]: - for id in [1, 2]: - alarm_id = "-".join([state, date.isoformat(), str(id)]) - project_id = "project-id%d" % id - alarm = models.Alarm(name=alarm_id, - type='threshold', - enabled=True, - alarm_id=alarm_id, - description='a', - state=state, - state_reason="state_reason", - state_timestamp=date, - timestamp=date, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=True, - user_id="user-id%d" % id, - project_id=project_id, - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=2.0, - statistic='avg', - evaluation_periods=60, - period=1, - meter_name='meter.test', - query=[{'field': - 'project_id', - 'op': 'eq', - 'value': - project_id}]), - severity='critical') - self.alarm_conn.create_alarm(alarm) - - def test_query_all(self): - data = self.post_json(self.alarm_url, - headers=admin_header, - params={}) - - self.assertEqual(12, len(data.json)) - - def test_filter_with_isotime_timestamp(self): - date_time = datetime.datetime(2013, 1, 1) - isotime = date_time.isoformat() - - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"filter": - '{">": {"timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(6, len(data.json)) - for alarm in data.json: - result_time = timeutils.parse_isotime(alarm['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertGreater(result_time, date_time) - - def test_filter_with_isotime_state_timestamp(self): - date_time = datetime.datetime(2013, 1, 1) - isotime = date_time.isoformat() - - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"filter": - '{">": {"state_timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(6, len(data.json)) - for alarm in data.json: - result_time = timeutils.parse_isotime(alarm['state_timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertGreater(result_time, date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.alarm_url, - params={}, - headers=non_admin_header) - for alarm in data.json: - self.assertEqual("project-id1", alarm['project_id']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project_id": "project-id1"}}'}, - headers=non_admin_header) - - for alarm in data.json: - self.assertEqual("project-id1", alarm['project_id']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.alarm_url, - params={}, - headers=admin_header) - - self.assertEqual(12, len(data.json)) - for alarm in data.json: - self.assertIn(alarm['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_can_query_any_project(self): - data = self.post_json(self.alarm_url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - headers=admin_header) - - self.assertEqual(6, len(data.json)) - for alarm in data.json: - self.assertIn(alarm['project_id'], set(["project-id2"])) - - def test_query_with_field_project(self): - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"filter": - '{"=": {"project": "project-id2"}}'}) - - self.assertEqual(6, len(data.json)) - for alarm_item in data.json: - self.assertIn(alarm_item['project_id'], set(["project-id2"])) - - def test_query_with_field_user_in_orderby(self): - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"filter": '{"=": {"state": "alarm"}}', - "orderby": '[{"user": "DESC"}]'}) - - self.assertEqual(4, len(data.json)) - self.assertEqual(["user-id2", "user-id2", "user-id1", "user-id1"], - [s["user_id"] for s in data.json]) - - def test_query_with_filter_orderby_and_limit(self): - orderby = '[{"state_timestamp": "DESC"}]' - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"filter": '{"=": {"state": "alarm"}}', - "orderby": orderby, - "limit": 3}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["2013-02-02T00:00:00", - "2013-02-02T00:00:00", - "2013-01-01T00:00:00"], - [a["state_timestamp"] for a in data.json]) - for alarm in data.json: - self.assertEqual("alarm", alarm["state"]) - - def test_query_with_orderby_severity(self): - if self.engine != "mysql": - self.skipTest("This is only implemented for MySQL") - orderby = '[{"severity": "ASC"}]' - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"orderby": orderby}) - alarms = list(data.json) - severities = [a['severity'] for a in alarms] - severity_choices = ['low', 'moderate', 'critical'] - sorted_severities = sorted(severities, key=severity_choices.index) - self.assertEqual(sorted_severities, severities) - - orderby = '[{"severity": "DESC"}]' - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"orderby": orderby}) - alarms = list(data.json) - severities = [a['severity'] for a in alarms] - sorted_severities = sorted(severities, key=severity_choices.index, - reverse=True) - self.assertEqual(sorted_severities, severities) - - def test_limit_should_be_positive(self): - data = self.post_json(self.alarm_url, - headers=admin_header, - params={"limit": 0}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit should be positive", data.body) - - -class TestQueryAlarmsHistoryController(tests_api.FunctionalTest): - - def setUp(self): - super(TestQueryAlarmsHistoryController, self).setUp() - self.url = '/query/alarms/history' - for id in [1, 2]: - for type in ["creation", "state transition"]: - for date in [datetime.datetime(2013, 1, 1), - datetime.datetime(2013, 2, 2)]: - event_id = "-".join([str(id), type, date.isoformat()]) - alarm_change = {"event_id": event_id, - "alarm_id": "alarm-id%d" % id, - "type": type, - "detail": "", - "user_id": "user-id%d" % id, - "project_id": "project-id%d" % id, - "on_behalf_of": "project-id%d" % id, - "timestamp": date} - - self.alarm_conn.record_alarm_change(alarm_change) - - def test_query_all(self): - data = self.post_json(self.url, - headers=admin_header, - params={}) - - self.assertEqual(8, len(data.json)) - - def test_filter_with_isotime(self): - date_time = datetime.datetime(2013, 1, 1) - isotime = date_time.isoformat() - - data = self.post_json(self.url, - headers=admin_header, - params={"filter": - '{">": {"timestamp":"' - + isotime + '"}}'}) - - self.assertEqual(4, len(data.json)) - for history in data.json: - result_time = timeutils.parse_isotime(history['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertGreater(result_time, date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.url, - params={}, - headers=non_admin_header) - for history in data.json: - self.assertEqual("project-id1", history['on_behalf_of']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"on_behalf_of":' - + ' "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"on_behalf_of":' - + ' "project-id1"}}'}, - headers=non_admin_header) - - for history in data.json: - self.assertEqual("project-id1", history['on_behalf_of']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.url, - params={}, - headers=admin_header) - - self.assertEqual(8, len(data.json)) - for history in data.json: - self.assertIn(history['on_behalf_of'], - (["project-id1", "project-id2"])) - - def test_query_with_filter_for_project_orderby_with_user(self): - data = self.post_json(self.url, - headers=admin_header, - params={"filter": - '{"=": {"project": "project-id1"}}', - "orderby": '[{"user": "DESC"}]', - "limit": 3}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["user-id1", - "user-id1", - "user-id1"], - [h["user_id"] for h in data.json]) - for history in data.json: - self.assertEqual("project-id1", history['project_id']) - - def test_query_with_filter_orderby_and_limit(self): - data = self.post_json(self.url, - headers=admin_header, - params={"filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["2013-02-02T00:00:00", - "2013-02-02T00:00:00", - "2013-01-01T00:00:00"], - [h["timestamp"] for h in data.json]) - for history in data.json: - self.assertEqual("creation", history['type']) - - def test_limit_should_be_positive(self): - data = self.post_json(self.url, - params={"limit": 0}, - headers=admin_header, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit should be positive", data.body) diff --git a/aodh/tests/functional/api/v2/test_query.py b/aodh/tests/functional/api/v2/test_query.py deleted file mode 100644 index 0e244f4f..00000000 --- a/aodh/tests/functional/api/v2/test_query.py +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test the methods related to query.""" -import datetime - -import fixtures -import mock -from oslo_utils import timeutils -from oslotest import base -import wsme - -from aodh.api.controllers.v2 import base as v2_base -from aodh.api.controllers.v2 import utils -from aodh import storage -from aodh.storage import base as alarm_storage_base -from aodh.tests import base as tests_base - - -class TestQuery(base.BaseTestCase): - def setUp(self): - super(TestQuery, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - 'pecan.response', mock.MagicMock())) - - def test_get_value_as_type_with_integer(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123', - type='integer') - expected = 123 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_float(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123.456', - type='float') - expected = 123.456 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_boolean(self): - query = v2_base.Query(field='metadata.is_public', - op='eq', - value='True', - type='boolean') - expected = True - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_string(self): - query = v2_base.Query(field='metadata.name', - op='eq', - value='linux', - type='string') - expected = 'linux' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_datetime(self): - query = v2_base.Query(field='metadata.date', - op='eq', - value='2014-01-01T05:00:00', - type='datetime') - self.assertIsInstance(query._get_value_as_type(), datetime.datetime) - self.assertIsNone(query._get_value_as_type().tzinfo) - - def test_get_value_as_type_with_integer_without_type(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123') - expected = 123 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_float_without_type(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123.456') - expected = 123.456 - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_boolean_without_type(self): - query = v2_base.Query(field='metadata.is_public', - op='eq', - value='True') - expected = True - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_string_without_type(self): - query = v2_base.Query(field='metadata.name', - op='eq', - value='linux') - expected = 'linux' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_bad_type(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='123.456', - type='blob') - self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) - - def test_get_value_as_type_with_bad_value(self): - query = v2_base.Query(field='metadata.size', - op='eq', - value='fake', - type='integer') - self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) - - def test_get_value_as_type_integer_expression_without_type(self): - # bug 1221736 - query = v2_base.Query(field='should_be_a_string', - op='eq', - value='WWW-Layer-4a80714f') - expected = 'WWW-Layer-4a80714f' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_boolean_expression_without_type(self): - # bug 1221736 - query = v2_base.Query(field='should_be_a_string', - op='eq', - value='True or False') - expected = 'True or False' - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_syntax_error(self): - # bug 1221736 - value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' - query = v2_base.Query(field='group_id', - op='eq', - value=value) - expected = value - self.assertEqual(expected, query._get_value_as_type()) - - def test_get_value_as_type_with_syntax_error_colons(self): - # bug 1221736 - value = 'Ref::StackId' - query = v2_base.Query(field='field_name', - op='eq', - value=value) - expected = value - self.assertEqual(expected, query._get_value_as_type()) - - -class TestQueryToKwArgs(tests_base.BaseTestCase): - def setUp(self): - super(TestQueryToKwArgs, self).setUp() - self.useFixture(fixtures.MockPatchObject( - utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) - self.useFixture(fixtures.MockPatchObject( - utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) - - def test_sample_filter_single(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertIn('user', kwargs) - self.assertEqual(1, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - - def test_sample_filter_multi(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid'), - v2_base.Query(field='project_id', - op='eq', - value='pid'), - v2_base.Query(field='resource_id', - op='eq', - value='rid'), - v2_base.Query(field='source', - op='eq', - value='source_name'), - v2_base.Query(field='meter', - op='eq', - value='meter_name')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(5, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - self.assertEqual('pid', kwargs['project']) - self.assertEqual('rid', kwargs['resource']) - self.assertEqual('source_name', kwargs['source']) - self.assertEqual('meter_name', kwargs['meter']) - - def test_sample_filter_timestamp(self): - ts_start = timeutils.utcnow() - ts_end = ts_start + datetime.timedelta(minutes=5) - q = [v2_base.Query(field='timestamp', - op='lt', - value=str(ts_end)), - v2_base.Query(field='timestamp', - op='gt', - value=str(ts_start))] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(4, len(kwargs)) - self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) - self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) - self.assertEqual('gt', kwargs['start_timestamp_op']) - self.assertEqual('lt', kwargs['end_timestamp_op']) - - def test_sample_filter_non_equality_on_metadata(self): - queries = [v2_base.Query(field='resource_metadata.image_id', - op='gt', - value='image', - type='string'), - v2_base.Query(field='metadata.ramdisk_id', - op='le', - value='ramdisk', - type='string')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__) - - def test_sample_filter_invalid_field(self): - q = [v2_base.Query(field='invalid', - op='eq', - value='20')] - self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_invalid_op(self): - q = [v2_base.Query(field='user_id', - op='lt', - value='20')] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_timestamp_invalid_op(self): - ts_start = timeutils.utcnow() - q = [v2_base.Query(field='timestamp', - op='eq', - value=str(ts_start))] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_exclude_internal(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake', - type='string') - for f in ['y', 'on_behalf_of', 'x']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises(wsme.exc.ClientSideError, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__, - internal_keys=['on_behalf_of']) - - def test_sample_filter_self_always_excluded(self): - queries = [v2_base.Query(field='user_id', - op='eq', - value='20')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - self.assertNotIn('self', kwargs) - - def test_sample_filter_translation(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake_%s' % f, - type='string') for f in ['user_id', - 'project_id', - 'resource_id']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - for o in ['user', 'project', 'resource']: - self.assertEqual('fake_%s_id' % o, kwargs.get(o)) - - def test_timestamp_validation(self): - q = [v2_base.Query(field='timestamp', - op='le', - value='123')] - - exc = self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - expected_exc = wsme.exc.InvalidInput('timestamp', '123', - 'invalid timestamp format') - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_alarm_changes_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, - alarm_storage_base.Connection.get_alarm_changes) - valid_keys = ['alarm_id', 'on_behalf_of', 'project', 'search_offset', - 'severity', 'timestamp', 'type', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_alarms_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, - alarm_storage_base.Connection.get_alarms) - valid_keys = ['alarm_id', 'enabled', 'exclude', 'meter', 'name', - 'project', 'severity', 'state', 'type', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) diff --git a/aodh/tests/functional/api/v2/test_wsme_custom_type.py b/aodh/tests/functional/api/v2/test_wsme_custom_type.py deleted file mode 100644 index 5e5fe4c2..00000000 --- a/aodh/tests/functional/api/v2/test_wsme_custom_type.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base -import wsme - -from aodh.api.controllers.v2 import base as v2_base - - -class TestWsmeCustomType(base.BaseTestCase): - - def test_advenum_default(self): - class dummybase(wsme.types.Base): - ae = v2_base.AdvEnum("name", str, "one", "other", default="other") - - obj = dummybase() - self.assertEqual("other", obj.ae) - - obj = dummybase(ae="one") - self.assertEqual("one", obj.ae) - - self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") diff --git a/aodh/tests/functional/db.py b/aodh/tests/functional/db.py deleted file mode 100644 index 29b309e6..00000000 --- a/aodh/tests/functional/db.py +++ /dev/null @@ -1,121 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base classes for API tests.""" -import os - -import fixtures -from oslo_config import fixture as fixture_config -from oslo_utils import uuidutils -import six -from six.moves.urllib import parse as urlparse - -from aodh import service -from aodh import storage -from aodh.tests import base as test_base - - -class SQLManager(fixtures.Fixture): - def __init__(self, conf): - self.conf = conf - db_name = 'aodh_%s' % uuidutils.generate_uuid(dashed=False) - import sqlalchemy - self._engine = sqlalchemy.create_engine(conf.database.connection) - self._conn = self._engine.connect() - self._create_db(self._conn, db_name) - self._conn.close() - self._engine.dispose() - parsed = list(urlparse.urlparse(conf.database.connection)) - # NOTE(jd) We need to set an host otherwise urlunparse() will not - # construct a proper URL - if parsed[1] == '': - parsed[1] = 'localhost' - parsed[2] = '/' + db_name - self.url = urlparse.urlunparse(parsed) - - -class PgSQLManager(SQLManager): - - @staticmethod - def _create_db(conn, db_name): - conn.connection.set_isolation_level(0) - conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name) - conn.connection.set_isolation_level(1) - - -class MySQLManager(SQLManager): - - @staticmethod - def _create_db(conn, db_name): - conn.execute('CREATE DATABASE %s;' % db_name) - - -class SQLiteManager(fixtures.Fixture): - - def __init__(self, conf): - self.url = "sqlite://" - - -@six.add_metaclass(test_base.SkipNotImplementedMeta) -class TestBase(test_base.BaseTestCase): - - DRIVER_MANAGERS = { - 'mysql': MySQLManager, - 'postgresql': PgSQLManager, - 'sqlite': SQLiteManager, - } - - def setUp(self): - super(TestBase, self).setUp() - db_url = os.environ.get( - 'AODH_TEST_STORAGE_URL', - 'sqlite://').replace( - "mysql://", "mysql+pymysql://") - engine = urlparse.urlparse(db_url).scheme - # In case some drivers have additional specification, for example: - # PyMySQL will have scheme mysql+pymysql. - self.engine = engine.split('+')[0] - - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.CONF.set_override('connection', db_url, group="database") - - manager = self.DRIVER_MANAGERS.get(self.engine) - if not manager: - self.skipTest("missing driver manager: %s" % self.engine) - - self.db_manager = manager(self.CONF) - - self.useFixture(self.db_manager) - - self.CONF.set_override('connection', self.db_manager.url, - group="database") - - self.alarm_conn = storage.get_connection_from_config(self.CONF) - self.alarm_conn.upgrade() - - self.useFixture(fixtures.MockPatch( - 'aodh.storage.get_connection_from_config', - side_effect=self._get_connection)) - - def tearDown(self): - self.alarm_conn.clear() - self.alarm_conn = None - super(TestBase, self).tearDown() - - def _get_connection(self, conf): - return self.alarm_conn diff --git a/aodh/tests/functional/gabbi/__init__.py b/aodh/tests/functional/gabbi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/functional/gabbi/fixtures.py b/aodh/tests/functional/gabbi/fixtures.py deleted file mode 100644 index c9f01030..00000000 --- a/aodh/tests/functional/gabbi/fixtures.py +++ /dev/null @@ -1,131 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Fixtures used during Gabbi-based test runs.""" - -import os - -from gabbi import fixture -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config -from oslo_policy import opts -from oslo_utils import uuidutils -from six.moves.urllib import parse as urlparse -import sqlalchemy_utils - -from aodh.api import app -from aodh import service -from aodh import storage - - -# NOTE(chdent): Hack to restore semblance of global configuration to -# pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso -# configuration, and the pecan application configuration of -# which the critical part is a reference to the current indexer. -LOAD_APP_KWARGS = None - - -def setup_app(): - global LOAD_APP_KWARGS - return app.load_app(**LOAD_APP_KWARGS) - - -class ConfigFixture(fixture.GabbiFixture): - """Establish the relevant configuration for a test run.""" - - def start_fixture(self): - """Set up config.""" - - global LOAD_APP_KWARGS - - self.conf = None - self.conn = None - - # Determine the database connection. - db_url = os.environ.get( - 'AODH_TEST_STORAGE_URL', "").replace( - "mysql://", "mysql+pymysql://") - if not db_url: - self.fail('No database connection configured') - - conf = service.prepare_service([], config_files=[]) - # NOTE(jd): prepare_service() is called twice: first by load_app() for - # Pecan, then Pecan calls pastedeploy, which starts the app, which has - # no way to pass the conf object so that Paste apps calls again - # prepare_service. In real life, that's not a problem, but here we want - # to be sure that the second time the same conf object is returned - # since we tweaked it. To that, once we called prepare_service() we - # mock it so it returns the same conf object. - self.prepare_service = service.prepare_service - service.prepare_service = mock.Mock() - service.prepare_service.return_value = conf - conf = fixture_config.Config(conf).conf - self.conf = conf - opts.set_defaults(self.conf) - - conf.set_override('policy_file', - os.path.abspath( - 'aodh/tests/open-policy.json'), - group='oslo_policy') - conf.set_override('auth_mode', None, group='api') - - parsed_url = urlparse.urlparse(db_url) - if parsed_url.scheme != 'sqlite': - parsed_url = list(parsed_url) - parsed_url[2] += '-%s' % uuidutils.generate_uuid(dashed=False) - db_url = urlparse.urlunparse(parsed_url) - - conf.set_override('connection', db_url, group='database') - - if (parsed_url[0].startswith("mysql") - or parsed_url[0].startswith("postgresql")): - sqlalchemy_utils.create_database(conf.database.connection) - - self.conn = storage.get_connection_from_config(self.conf) - self.conn.upgrade() - - LOAD_APP_KWARGS = { - 'conf': conf, - } - - def stop_fixture(self): - """Reset the config and remove data.""" - if self.conn: - self.conn.clear() - if self.conf: - self.conf.reset() - service.prepare_service = self.prepare_service - - -class CORSConfigFixture(fixture.GabbiFixture): - """Inject mock configuration for the CORS middleware.""" - - def start_fixture(self): - # Here we monkeypatch GroupAttr.__getattr__, necessary because the - # paste.ini method of initializing this middleware creates its own - # ConfigOpts instance, bypassing the regular config fixture. - - def _mock_getattr(instance, key): - if key != 'allowed_origin': - return self._original_call_method(instance, key) - return "http://valid.example.com" - - self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ - cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr - - def stop_fixture(self): - """Remove the monkeypatch.""" - cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method diff --git a/aodh/tests/functional/gabbi/gabbits/alarms.yaml b/aodh/tests/functional/gabbi/gabbits/alarms.yaml deleted file mode 100644 index 40ebe11a..00000000 --- a/aodh/tests/functional/gabbi/gabbits/alarms.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# Requests to cover the basic endpoints for alarms. - -fixtures: - - ConfigFixture - -tests: -- name: list alarms none - desc: Lists alarms, none yet exist - GET: /v2/alarms - response_strings: - - "[]" - -- name: try to PUT an alarm - desc: what does PUT do - PUT: /v2/alarms - request_headers: - content-type: application/json - data: - name: added_alarm_defaults2 - type: threshold - threshold_rule: - meter_name: ameter - threshold: 300.0 - status: 405 - response_headers: - allow: GET, POST - -- name: createAlarm - desc: Creates an alarm. - POST: /v2/alarms - request_headers: - content-type: application/json - data: - ok_actions: null - name: added_alarm_defaults - type: threshold - threshold_rule: - meter_name: ameter - threshold: 300.0 - status: 201 - response_headers: - location: /$SCHEME://$NETLOC/v2/alarms/ - content-type: application/json - response_json_paths: - $.severity: low - $.threshold_rule.threshold: 300.0 - $.threshold_rule.comparison_operator: eq - -- name: showAlarm - desc: Shows information for a specified alarm. - GET: /v2/alarms/$RESPONSE['$.alarm_id'] - response_json_paths: - $.severity: low - $.alarm_id: $RESPONSE['$.alarm_id'] - $.threshold_rule.threshold: 300.0 - $.threshold_rule.comparison_operator: eq - response_headers: - content-type: application/json - -- name: updateAlarm - desc: Updates a specified alarm. - PUT: /v2/alarms/$RESPONSE['$.alarm_id'] - request_headers: - content-type: application/json - data: - name: added_alarm_defaults - type: threshold - severity: moderate - threshold_rule: - meter_name: ameter - threshold: 200.0 -# TODO(chdent): why do we have a response, why not status: 204? -# status: 204 - response_json_paths: - $.threshold_rule.threshold: 200.0 - $.severity: moderate - $.state: insufficient data - -- name: showAlarmHistory - desc: Assembles the history for a specified alarm. - GET: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change - response_json_paths: - $[0].type: rule change - -- name: updateAlarmState - desc: Sets the state of a specified alarm. - PUT: /v2/alarms/$RESPONSE['$[0].alarm_id']/state - request_headers: - content-type: application/json - data: '"alarm"' -# TODO(chdent): really? Of what possible use is this? - response_json_paths: - $: alarm - -# Get a list of alarms so we can extract an id for the next test -- name: list alarms - desc: Lists alarms, only one - GET: /v2/alarms - response_json_paths: - $[0].name: added_alarm_defaults - -- name: showAlarmState - desc: Gets the state of a specified alarm. - GET: /v2/alarms/$RESPONSE['$[0].alarm_id']/state - response_headers: - content-type: application/json - response_json_paths: - $: alarm - -- name: deleteAlarm - desc: Deletes a specified alarm. - DELETE: /v2/alarms/$HISTORY['list alarms'].$RESPONSE['$[0].alarm_id'] - status: 204 - -- name: list alarms none end - desc: Lists alarms, none now exist - GET: /v2/alarms - response_strings: - - "[]" diff --git a/aodh/tests/functional/gabbi/gabbits/basic.yaml b/aodh/tests/functional/gabbi/gabbits/basic.yaml deleted file mode 100644 index d56a0de6..00000000 --- a/aodh/tests/functional/gabbi/gabbits/basic.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# -# Some simple tests just to confirm that the system works. -# -fixtures: - - ConfigFixture - -tests: - -# Root gives us some information on where to go from here. -- name: quick root check - GET: / - response_headers: - content-type: application/json - response_strings: - - '"base": "application/json"' - response_json_paths: - versions.values.[0].status: stable - versions.values.[0].media-types.[0].base: application/json - -# NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! -- name: v2 visit - desc: this demonstrates a bug in the info in / - GET: $RESPONSE['versions.values.[0].links.[0].href'] - status: 404 diff --git a/aodh/tests/functional/gabbi/gabbits/capabilities.yaml b/aodh/tests/functional/gabbi/gabbits/capabilities.yaml deleted file mode 100644 index 66a7d7a7..00000000 --- a/aodh/tests/functional/gabbi/gabbits/capabilities.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# -# Explore the capabilities API -# -fixtures: - - ConfigFixture - -tests: - -- name: get capabilities - GET: /v2/capabilities - response_json_paths: - $.alarm_storage.['storage:production_ready']: true diff --git a/aodh/tests/functional/gabbi/gabbits/healthcheck.yaml b/aodh/tests/functional/gabbi/gabbits/healthcheck.yaml deleted file mode 100644 index a2cf6fd1..00000000 --- a/aodh/tests/functional/gabbi/gabbits/healthcheck.yaml +++ /dev/null @@ -1,7 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: healthcheck - GET: /healthcheck - status: 200 diff --git a/aodh/tests/functional/gabbi/gabbits/middleware.yaml b/aodh/tests/functional/gabbi/gabbits/middleware.yaml deleted file mode 100644 index 3d220483..00000000 --- a/aodh/tests/functional/gabbi/gabbits/middleware.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# -# Test the middlewares. Just CORS for now. -# - -fixtures: - - ConfigFixture - - CORSConfigFixture - -tests: - - - name: valid cors options - OPTIONS: / - status: 200 - request_headers: - origin: http://valid.example.com - access-control-request-method: GET - response_headers: - access-control-allow-origin: http://valid.example.com - - - name: invalid cors options - OPTIONS: / - status: 200 - request_headers: - origin: http://invalid.example.com - access-control-request-method: GET - response_forbidden_headers: - - access-control-allow-origin - - - name: valid cors get - GET: / - status: 200 - request_headers: - origin: http://valid.example.com - access-control-request-method: GET - response_headers: - access-control-allow-origin: http://valid.example.com - - - name: invalid cors get - GET: / - status: 200 - request_headers: - origin: http://invalid.example.com - response_forbidden_headers: - - access-control-allow-origin diff --git a/aodh/tests/functional/gabbi/test_gabbi.py b/aodh/tests/functional/gabbi/test_gabbi.py deleted file mode 100644 index 67d96188..00000000 --- a/aodh/tests/functional/gabbi/test_gabbi.py +++ /dev/null @@ -1,36 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the aodh API with gabbi - -For the sake of exploratory development. -""" - -import os - -from gabbi import driver - -from aodh.tests.functional.gabbi import fixtures as fixture_module - - -TESTS_DIR = 'gabbits' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host=None, - intercept=fixture_module.setup_app, - fixture_module=fixture_module) diff --git a/aodh/tests/functional/hooks/post_test_hook.sh b/aodh/tests/functional/hooks/post_test_hook.sh deleted file mode 100755 index 4eba96f5..00000000 --- a/aodh/tests/functional/hooks/post_test_hook.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -xe - -#FIXME(sileht): remove me when dsvm gate job is removed - - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -set -e -exit 0 diff --git a/aodh/tests/functional/storage/__init__.py b/aodh/tests/functional/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/functional/storage/sqlalchemy/__init__.py b/aodh/tests/functional/storage/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/functional/storage/sqlalchemy/test_migrations.py b/aodh/tests/functional/storage/sqlalchemy/test_migrations.py deleted file mode 100644 index 9d126a15..00000000 --- a/aodh/tests/functional/storage/sqlalchemy/test_migrations.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import mock -from oslo_db.sqlalchemy import test_migrations -import six - -from aodh.storage.sqlalchemy import models -from aodh.tests import base -from aodh.tests.functional import db as tests_db - - -class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta): - pass - - -class ModelsMigrationsSync( - six.with_metaclass(ABCSkip, - tests_db.TestBase, - test_migrations.ModelsMigrationsSync)): - - def setUp(self): - super(ModelsMigrationsSync, self).setUp() - self.db = mock.Mock() - - @staticmethod - def get_metadata(): - return models.Base.metadata - - def get_engine(self): - return self.alarm_conn._engine_facade.get_engine() - - def db_sync(self, engine): - pass diff --git a/aodh/tests/functional/storage/test_get_connection.py b/aodh/tests/functional/storage/test_get_connection.py deleted file mode 100644 index 38312ce4..00000000 --- a/aodh/tests/functional/storage/test_get_connection.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for aodh/storage/ -""" -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from aodh import service -from aodh import storage -from aodh.storage import impl_log - -import six - - -class EngineTest(base.BaseTestCase): - def setUp(self): - super(EngineTest, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - - def test_get_connection(self): - self.CONF.set_override('connection', 'log://localhost', - group='database') - engine = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(engine, impl_log.Connection) - - def test_get_connection_no_such_engine(self): - self.CONF.set_override('connection', 'no-such-engine://localhost', - group='database') - self.CONF.set_override('max_retries', 0, 'database') - try: - storage.get_connection_from_config(self.CONF) - except RuntimeError as err: - self.assertIn('no-such-engine', six.text_type(err)) - - -class ConnectionRetryTest(base.BaseTestCase): - def setUp(self): - super(ConnectionRetryTest, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - - def test_retries(self): - max_retries = 5 - with mock.patch.object( - storage.impl_log.Connection, '__init__') as log_init: - - class ConnectionError(Exception): - pass - - def x(a, b): - raise ConnectionError - - log_init.side_effect = x - self.CONF.set_override("connection", "log://", "database") - self.CONF.set_override("retry_interval", 0.00001, "database") - self.CONF.set_override("max_retries", max_retries, "database") - self.assertRaises(ConnectionError, - storage.get_connection_from_config, - self.CONF) - self.assertEqual(max_retries, log_init.call_count) - - -class ConnectionConfigTest(base.BaseTestCase): - def setUp(self): - super(ConnectionConfigTest, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - - def test_only_default_url(self): - self.CONF.set_override("connection", "log://", group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) diff --git a/aodh/tests/functional/storage/test_impl_log.py b/aodh/tests/functional/storage/test_impl_log.py deleted file mode 100644 index 9a6c71a7..00000000 --- a/aodh/tests/functional/storage/test_impl_log.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg -from oslotest import base - -from aodh.storage import impl_log - - -class ConnectionTest(base.BaseTestCase): - @staticmethod - def test_get_connection(): - impl_log.Connection(cfg.CONF, None) diff --git a/aodh/tests/functional/storage/test_impl_sqlalchemy.py b/aodh/tests/functional/storage/test_impl_sqlalchemy.py deleted file mode 100644 index a9c7f417..00000000 --- a/aodh/tests/functional/storage/test_impl_sqlalchemy.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for aodh/storage/impl_sqlalchemy.py - -.. note:: - In order to run the tests against real SQL server set the environment - variable aodh_TEST_SQL_URL to point to a SQL server before running - the tests. - -""" - -from aodh.storage import impl_sqlalchemy as impl_sqla_alarm -from aodh.tests import base as test_base - - -class CapabilitiesTest(test_base.BaseTestCase): - def test_alarm_capabilities(self): - expected_capabilities = { - 'alarms': {'query': {'simple': True, - 'complex': True}, - 'history': {'query': {'simple': True, - 'complex': True}}}, - } - - actual_capabilities = impl_sqla_alarm.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) diff --git a/aodh/tests/functional/storage/test_storage_scenarios.py b/aodh/tests/functional/storage/test_storage_scenarios.py deleted file mode 100644 index 2679f8be..00000000 --- a/aodh/tests/functional/storage/test_storage_scenarios.py +++ /dev/null @@ -1,502 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" Base classes for DB backend implementation test -""" - -import datetime - -import mock -from oslo_utils import timeutils - -from aodh import storage -from aodh.storage import models as alarm_models -from aodh.tests import constants -from aodh.tests.functional import db as tests_db - - -class DBTestBase(tests_db.TestBase): - @staticmethod - def create_side_effect(method, exception_type, test_exception): - def side_effect(*args, **kwargs): - if test_exception.pop(): - raise exception_type - else: - return method(*args, **kwargs) - return side_effect - - def setUp(self): - super(DBTestBase, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) - - -class AlarmTestBase(DBTestBase): - def add_some_alarms(self): - alarms = [alarm_models.Alarm(alarm_id='r3d', - enabled=True, - type='threshold', - name='red-alert', - description='my red-alert', - timestamp=datetime.datetime(2015, 7, - 2, 10, 25), - user_id='me', - project_id='and-da-boys', - state="insufficient data", - state_reason="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[dict(name='testcons', - start='0 11 * * *', - duration=300)], - rule=dict(comparison_operator='eq', - threshold=36, - statistic='count', - evaluation_periods=1, - period=60, - meter_name='test.one', - query=[{'field': 'key', - 'op': 'eq', - 'value': 'value', - 'type': 'string'}]), - ), - alarm_models.Alarm(alarm_id='0r4ng3', - enabled=True, - type='threshold', - name='orange-alert', - description='a orange', - timestamp=datetime.datetime(2015, 7, - 2, 10, 40), - user_id='me', - project_id='and-da-boys', - state="insufficient data", - state_reason="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='gt', - threshold=75, - statistic='avg', - evaluation_periods=1, - period=60, - meter_name='test.forty', - query=[{'field': 'key2', - 'op': 'eq', - 'value': 'value2', - 'type': 'string'}]), - ), - alarm_models.Alarm(alarm_id='y3ll0w', - enabled=False, - type='threshold', - name='yellow-alert', - description='yellow', - timestamp=datetime.datetime(2015, 7, - 2, 10, 10), - user_id='me', - project_id='and-da-boys', - state="insufficient data", - state_reason="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=['http://nowhere/alarms'], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='lt', - threshold=10, - statistic='min', - evaluation_periods=1, - period=60, - meter_name='test.five', - query=[{'field': 'key2', - 'op': 'eq', - 'value': 'value2', - 'type': 'string'}, - {'field': - 'user_metadata.key3', - 'op': 'eq', - 'value': 'value3', - 'type': 'string'}]), - )] - - for a in alarms: - self.alarm_conn.create_alarm(a) - - -class AlarmTest(AlarmTestBase): - - def test_empty(self): - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual([], alarms) - - def test_list(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - def test_list_ordered_by_timestamp(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(len(alarms), 3) - alarm_l = [a.timestamp for a in alarms] - alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40), - datetime.datetime(2015, 7, 2, 10, 25), - datetime.datetime(2015, 7, 2, 10, 10)] - self.assertEqual(alarm_l_ordered, alarm_l) - - def test_list_enabled(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms(enabled=True)) - self.assertEqual(2, len(alarms)) - - def test_list_disabled(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms(enabled=False)) - self.assertEqual(1, len(alarms)) - - def test_list_by_type(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms(alarm_type='threshold')) - self.assertEqual(3, len(alarms)) - - def test_list_excluded_by_name(self): - self.add_some_alarms() - exclude = {'name': 'yellow-alert'} - alarms = list(self.alarm_conn.get_alarms(exclude=exclude)) - self.assertEqual(2, len(alarms)) - alarm_names = sorted([a.name for a in alarms]) - self.assertEqual(['orange-alert', 'red-alert'], alarm_names) - - def test_add(self): - self.add_some_alarms() - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - - meter_names = sorted([a.rule['meter_name'] for a in alarms]) - self.assertEqual(['test.five', 'test.forty', 'test.one'], meter_names) - - def test_update(self): - self.add_some_alarms() - orange = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] - orange.enabled = False - orange.state = alarm_models.Alarm.ALARM_INSUFFICIENT_DATA - query = [{'field': 'metadata.group', - 'op': 'eq', - 'value': 'test.updated', - 'type': 'string'}] - orange.rule['query'] = query - orange.rule['meter_name'] = 'new_meter_name' - updated = self.alarm_conn.update_alarm(orange) - self.assertFalse(updated.enabled) - self.assertEqual(alarm_models.Alarm.ALARM_INSUFFICIENT_DATA, - updated.state) - self.assertEqual(query, updated.rule['query']) - self.assertEqual('new_meter_name', updated.rule['meter_name']) - - def test_update_llu(self): - llu = alarm_models.Alarm(alarm_id='llu', - enabled=True, - type='threshold', - name='llu', - description='llu', - timestamp=constants.MIN_DATETIME, - user_id='bla', - project_id='ffo', - state="insufficient data", - state_reason="insufficient data", - state_timestamp=constants.MIN_DATETIME, - ok_actions=[], - alarm_actions=[], - insufficient_data_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict(comparison_operator='lt', - threshold=34, - statistic='max', - evaluation_periods=1, - period=60, - meter_name='llt', - query=[]) - ) - updated = self.alarm_conn.create_alarm(llu) - updated.state = alarm_models.Alarm.ALARM_OK - updated.description = ':)' - self.alarm_conn.update_alarm(updated) - - all = list(self.alarm_conn.get_alarms()) - self.assertEqual(1, len(all)) - - def test_update_deleted_alarm_failed(self): - self.add_some_alarms() - alarm1 = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] - self.alarm_conn.delete_alarm(alarm1.alarm_id) - survivors = list(self.alarm_conn.get_alarms()) - self.assertEqual(2, len(survivors)) - alarm1.state = alarm_models.Alarm.ALARM_ALARM - self.assertRaises(storage.AlarmNotFound, - self.alarm_conn.update_alarm, alarm1) - survivors = list(self.alarm_conn.get_alarms()) - self.assertEqual(2, len(survivors)) - - def test_delete(self): - self.add_some_alarms() - victim = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] - self.alarm_conn.delete_alarm(victim.alarm_id) - survivors = list(self.alarm_conn.get_alarms()) - self.assertEqual(2, len(survivors)) - for s in survivors: - self.assertNotEqual(victim.name, s.name) - - -class AlarmHistoryTest(AlarmTestBase): - - def setUp(self): - super(AlarmTestBase, self).setUp() - self.add_some_alarms() - self.prepare_alarm_history() - - def prepare_alarm_history(self): - alarms = list(self.alarm_conn.get_alarms()) - for alarm in alarms: - i = alarms.index(alarm) - alarm_change = { - "event_id": "3e11800c-a3ca-4991-b34b-d97efb6047d%s" % i, - "alarm_id": alarm.alarm_id, - "type": alarm_models.AlarmChange.CREATION, - "detail": "detail %s" % alarm.name, - "user_id": alarm.user_id, - "project_id": alarm.project_id, - "on_behalf_of": alarm.project_id, - "timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i) - } - self.alarm_conn.record_alarm_change(alarm_change=alarm_change) - - def _clear_alarm_history(self, utcnow, ttl, count): - self.mock_utcnow.return_value = utcnow - self.alarm_conn.clear_expired_alarm_history_data(ttl) - history = list(self.alarm_conn.query_alarm_history()) - self.assertEqual(count, len(history)) - - def test_clear_alarm_history_no_data_to_remove(self): - utcnow = datetime.datetime(2013, 4, 7, 7, 30) - self._clear_alarm_history(utcnow, 1, 3) - - def test_clear_some_alarm_history(self): - utcnow = datetime.datetime(2014, 4, 7, 7, 35) - self._clear_alarm_history(utcnow, 3 * 60, 1) - - def test_clear_all_alarm_history(self): - utcnow = datetime.datetime(2014, 4, 7, 7, 45) - self._clear_alarm_history(utcnow, 3 * 60, 0) - - def test_delete_history_when_delete_alarm(self): - alarms = list(self.alarm_conn.get_alarms()) - self.assertEqual(3, len(alarms)) - history = list(self.alarm_conn.query_alarm_history()) - self.assertEqual(3, len(history)) - for alarm in alarms: - self.alarm_conn.delete_alarm(alarm.alarm_id) - self.assertEqual(3, len(alarms)) - history = list(self.alarm_conn.query_alarm_history()) - self.assertEqual(0, len(history)) - - -class ComplexAlarmQueryTest(AlarmTestBase): - - def test_no_filter(self): - self.add_some_alarms() - result = list(self.alarm_conn.query_alarms()) - self.assertEqual(3, len(result)) - - def test_no_filter_with_limit(self): - self.add_some_alarms() - result = list(self.alarm_conn.query_alarms(limit=2)) - self.assertEqual(2, len(result)) - - def test_filter(self): - self.add_some_alarms() - filter_expr = {"and": - [{"or": - [{"=": {"name": "yellow-alert"}}, - {"=": {"name": "red-alert"}}]}, - {"=": {"enabled": True}}]} - - result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) - - self.assertEqual(1, len(result)) - for a in result: - self.assertIn(a.name, set(["yellow-alert", "red-alert"])) - self.assertTrue(a.enabled) - - def test_filter_with_regexp(self): - self.add_some_alarms() - filter_expr = {"and": - [{"or": [{"=": {"name": "yellow-alert"}}, - {"=": {"name": "red-alert"}}]}, - {"=~": {"description": "yel.*"}}]} - - result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) - - self.assertEqual(1, len(result)) - for a in result: - self.assertEqual("yellow", a.description) - - def test_filter_for_alarm_id(self): - self.add_some_alarms() - filter_expr = {"=": {"alarm_id": "0r4ng3"}} - - result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) - - self.assertEqual(1, len(result)) - for a in result: - self.assertEqual("0r4ng3", a.alarm_id) - - def test_filter_and_orderby(self): - self.add_some_alarms() - result = list(self.alarm_conn.query_alarms(filter_expr=( - {"=": {"enabled": True}}), - orderby=[{"name": "asc"}])) - self.assertEqual(2, len(result)) - self.assertEqual(["orange-alert", "red-alert"], - [a.name for a in result]) - for a in result: - self.assertTrue(a.enabled) - - -class ComplexAlarmHistoryQueryTest(AlarmTestBase): - def setUp(self): - super(DBTestBase, self).setUp() - self.filter_expr = {"and": - [{"or": - [{"=": {"type": "rule change"}}, - {"=": {"type": "state transition"}}]}, - {"=": {"alarm_id": "0r4ng3"}}]} - self.add_some_alarms() - self.prepare_alarm_history() - - def prepare_alarm_history(self): - alarms = list(self.alarm_conn.get_alarms()) - name_index = { - 'red-alert': 0, - 'orange-alert': 1, - 'yellow-alert': 2 - } - - for alarm in alarms: - i = name_index[alarm.name] - alarm_change = dict(event_id=( - "16fd2706-8baf-433b-82eb-8c7fada847c%s" % i), - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.CREATION, - detail="detail %s" % alarm.name, - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 24, - 7 + i, - 30 + i)) - self.alarm_conn.record_alarm_change(alarm_change=alarm_change) - - alarm_change2 = dict(event_id=( - "16fd2706-8baf-433b-82eb-8c7fada847d%s" % i), - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.RULE_CHANGE, - detail="detail %s" % i, - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 25, - 10 + i, - 30 + i)) - self.alarm_conn.record_alarm_change(alarm_change=alarm_change2) - - alarm_change3 = dict( - event_id="16fd2706-8baf-433b-82eb-8c7fada847e%s" % i, - alarm_id=alarm.alarm_id, - type=alarm_models.AlarmChange.STATE_TRANSITION, - detail="detail %s" % (i + 1), - user_id=alarm.user_id, - project_id=alarm.project_id, - on_behalf_of=alarm.project_id, - timestamp=datetime.datetime(2012, 9, 26, 10 + i, 30 + i) - ) - - if alarm.name == "red-alert": - alarm_change3['on_behalf_of'] = 'and-da-girls' - - self.alarm_conn.record_alarm_change(alarm_change=alarm_change3) - - def test_alarm_history_with_no_filter(self): - history = list(self.alarm_conn.query_alarm_history()) - self.assertEqual(9, len(history)) - - def test_alarm_history_with_no_filter_and_limit(self): - history = list(self.alarm_conn.query_alarm_history(limit=3)) - self.assertEqual(3, len(history)) - - def test_alarm_history_with_filter(self): - history = list( - self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr)) - self.assertEqual(2, len(history)) - - def test_alarm_history_with_regexp(self): - filter_expr = {"and": - [{"=~": {"type": "(rule)|(state)"}}, - {"=": {"alarm_id": "0r4ng3"}}]} - history = list( - self.alarm_conn.query_alarm_history(filter_expr=filter_expr)) - self.assertEqual(2, len(history)) - - def test_alarm_history_with_filter_and_orderby(self): - history = list( - self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, - orderby=[{"timestamp": - "asc"}])) - self.assertEqual([alarm_models.AlarmChange.RULE_CHANGE, - alarm_models.AlarmChange.STATE_TRANSITION], - [h.type for h in history]) - - def test_alarm_history_with_filter_and_orderby_and_limit(self): - history = list( - self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, - orderby=[{"timestamp": - "asc"}], - limit=1)) - self.assertEqual(alarm_models.AlarmChange.RULE_CHANGE, history[0].type) - - def test_alarm_history_with_on_behalf_of_filter(self): - filter_expr = {"=": {"on_behalf_of": "and-da-girls"}} - history = list(self.alarm_conn.query_alarm_history( - filter_expr=filter_expr)) - self.assertEqual(1, len(history)) - self.assertEqual("16fd2706-8baf-433b-82eb-8c7fada847e0", - history[0].event_id) - - def test_alarm_history_with_alarm_id_as_filter(self): - filter_expr = {"=": {"alarm_id": "r3d"}} - history = list(self.alarm_conn.query_alarm_history( - filter_expr=filter_expr, orderby=[{"timestamp": "asc"}])) - self.assertEqual(3, len(history)) - self.assertEqual([alarm_models.AlarmChange.CREATION, - alarm_models.AlarmChange.RULE_CHANGE, - alarm_models.AlarmChange.STATE_TRANSITION], - [h.type for h in history]) diff --git a/aodh/tests/functional_live/__init__.py b/aodh/tests/functional_live/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/functional_live/gabbi/__init__.py b/aodh/tests/functional_live/gabbi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/functional_live/gabbi/gabbits-live/alarms.yaml b/aodh/tests/functional_live/gabbi/gabbits-live/alarms.yaml deleted file mode 100644 index 2458b1fe..00000000 --- a/aodh/tests/functional_live/gabbi/gabbits-live/alarms.yaml +++ /dev/null @@ -1,118 +0,0 @@ -defaults: - request_headers: - x-auth-token: $ENVIRON['AODH_SERVICE_TOKEN'] - x-roles: $ENVIRON['AODH_SERVICE_ROLES'] - -tests: - - name: list alarms none - desc: Lists alarms, none yet exist - GET: /v2/alarms - response_strings: - - "[]" - - - name: try to PUT an alarm - desc: what does PUT do - PUT: /v2/alarms - request_headers: - content-type: application/json - data: - name: added_alarm_defaults2 - type: threshold - threshold_rule: - meter_name: ameter - threshold: 300.0 - status: 405 - response_headers: - allow: GET, POST - - - name: createAlarm - desc: Creates an alarm. - POST: /v2/alarms - request_headers: - content-type: application/json - data: - name: added_alarm_defaults - type: threshold - threshold_rule: - meter_name: ameter - threshold: 300.0 - status: 201 - response_headers: - location: /$SCHEME://$NETLOC/v2/alarms/ - content-type: application/json - response_json_paths: - $.severity: low - $.threshold_rule.threshold: 300.0 - $.threshold_rule.comparison_operator: eq - - - name: showAlarm - desc: Shows information for a specified alarm. - GET: /v2/alarms/$RESPONSE['$.alarm_id'] - response_json_paths: - $.severity: low - $.alarm_id: $RESPONSE['$.alarm_id'] - $.threshold_rule.threshold: 300.0 - $.threshold_rule.comparison_operator: eq - response_headers: - content-type: application/json - - - name: updateAlarm - desc: Updates a specified alarm. - PUT: /v2/alarms/$RESPONSE['$.alarm_id'] - request_headers: - content-type: application/json - data: - name: added_alarm_defaults - type: threshold - severity: moderate - threshold_rule: - meter_name: ameter - threshold: 200.0 - # TODO(chdent): why do we have a response, why not status: 204? - # status: 204 - response_json_paths: - $.threshold_rule.threshold: 200.0 - $.severity: moderate - $.state: insufficient data - - - name: showAlarmHistory - desc: Assembles the history for a specified alarm. - GET: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change - response_json_paths: - $[0].type: rule change - - - name: updateAlarmState - desc: Sets the state of a specified alarm. - PUT: /v2/alarms/$RESPONSE['$[0].alarm_id']/state - request_headers: - content-type: application/json - data: '"alarm"' - # TODO(chdent): really? Of what possible use is this? - response_json_paths: - $: alarm - - # Get a list of alarms so we can extract an id for the next test - - name: list alarms - desc: Lists alarms, only one - GET: /v2/alarms - response_json_paths: - $[0].name: added_alarm_defaults - - - name: showAlarmState - desc: Gets the state of a specified alarm. - GET: /v2/alarms/$RESPONSE['$[0].alarm_id']/state - response_headers: - content-type: application/json - response_json_paths: - $: alarm - - - name: deleteAlarm - desc: Deletes a specified alarm. - DELETE: /v2/alarms/$HISTORY['list alarms'].$RESPONSE['$[0].alarm_id'] - status: 204 - - - name: list alarms none end - desc: Lists alarms, none now exist - GET: /v2/alarms - response_strings: - - "[]" diff --git a/aodh/tests/functional_live/gabbi/test_gabbi_live.py b/aodh/tests/functional_live/gabbi/test_gabbi_live.py deleted file mode 100644 index 7f48c739..00000000 --- a/aodh/tests/functional_live/gabbi/test_gabbi_live.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A test module to exercise the Gnocchi API with gabbi. - -This is designed to run against a real running web server (started by -devstack). -""" - -import os - -from gabbi import driver -import six.moves.urllib.parse as urlparse - - -TESTS_DIR = 'gabbits-live' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - aodh_url = os.getenv('AODH_URL') - if aodh_url: - parsed_url = urlparse.urlsplit(aodh_url) - prefix = parsed_url.path.rstrip('/') # turn it into a prefix - - # NOTE(chdent): gabbi requires a port be passed or it will - # default to 8001, so we must dance a little dance to get - # the right ports. Probably gabbi needs to change. - # https://github.com/cdent/gabbi/issues/50 - port = 443 if parsed_url.scheme == 'https' else 80 - if parsed_url.port: - port = parsed_url.port - - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, - host=parsed_url.hostname, - port=port, - prefix=prefix) - elif os.getenv('GABBI_LIVE_FAIL_IF_NO_TEST'): - raise RuntimeError('AODH_URL is not set') diff --git a/aodh/tests/open-policy.json b/aodh/tests/open-policy.json deleted file mode 100644 index 8f0602af..00000000 --- a/aodh/tests/open-policy.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "context_is_admin": "role:admin", - "segregation": "rule:context_is_admin", - "default": "" -} diff --git a/aodh/tests/tempest/__init__.py b/aodh/tests/tempest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/tempest/api/__init__.py b/aodh/tests/tempest/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/tempest/api/base.py b/aodh/tests/tempest/api/base.py deleted file mode 100644 index 33afd9b0..00000000 --- a/aodh/tests/tempest/api/base.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import exceptions as lib_exc -import tempest.test - -from aodh.tests.tempest.service import client - -CONF = config.CONF - - -class BaseAlarmingTest(tempest.test.BaseTestCase): - """Base test case class for all Alarming API tests.""" - - credentials = ['primary'] - client_manager = client.Manager - - @classmethod - def skip_checks(cls): - super(BaseAlarmingTest, cls).skip_checks() - if not CONF.service_available.aodh_plugin: - raise cls.skipException("Aodh support is required") - - @classmethod - def setup_clients(cls): - super(BaseAlarmingTest, cls).setup_clients() - cls.alarming_client = cls.os_primary.alarming_client - - @classmethod - def resource_setup(cls): - super(BaseAlarmingTest, cls).resource_setup() - cls.alarm_ids = [] - - @classmethod - def create_alarm(cls, **kwargs): - body = cls.alarming_client.create_alarm( - name=data_utils.rand_name('telemetry_alarm'), - type='threshold', **kwargs) - cls.alarm_ids.append(body['alarm_id']) - return body - - @staticmethod - def cleanup_resources(method, list_of_ids): - for resource_id in list_of_ids: - try: - method(resource_id) - except lib_exc.NotFound: - pass - - @classmethod - def resource_cleanup(cls): - cls.cleanup_resources(cls.alarming_client.delete_alarm, cls.alarm_ids) - super(BaseAlarmingTest, cls).resource_cleanup() diff --git a/aodh/tests/tempest/api/test_alarming_api.py b/aodh/tests/tempest/api/test_alarming_api.py deleted file mode 100644 index 0132ce9c..00000000 --- a/aodh/tests/tempest/api/test_alarming_api.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from aodh.tests.tempest.api import base - - -class TelemetryAlarmingAPITest(base.BaseAlarmingTest): - - @classmethod - def resource_setup(cls): - super(TelemetryAlarmingAPITest, cls).resource_setup() - cls.rule = {'meter_name': 'cpu_util', - 'comparison_operator': 'gt', - 'threshold': 80.0, - 'period': 70} - for i in range(2): - cls.create_alarm(threshold_rule=cls.rule) - - @decorators.idempotent_id('1c918e06-210b-41eb-bd45-14676dd77cd7') - def test_alarm_list(self): - # List alarms - alarm_list = self.alarming_client.list_alarms() - - # Verify created alarm in the list - fetched_ids = [a['alarm_id'] for a in alarm_list] - missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids] - self.assertEqual(0, len(missing_alarms), - "Failed to find the following created alarm(s)" - " in a fetched list: %s" % - ', '.join(str(a) for a in missing_alarms)) - - @decorators.idempotent_id('1297b095-39c1-4e74-8a1f-4ae998cedd68') - def test_create_update_get_delete_alarm(self): - # Create an alarm - alarm_name = data_utils.rand_name('telemetry_alarm') - body = self.alarming_client.create_alarm( - name=alarm_name, type='threshold', threshold_rule=self.rule) - self.assertEqual(alarm_name, body['name']) - alarm_id = body['alarm_id'] - self.assertDictContainsSubset(self.rule, body['threshold_rule']) - # Update alarm with new rule and new name - new_rule = {'meter_name': 'cpu', - 'comparison_operator': 'eq', - 'threshold': 70.0, - 'period': 60} - alarm_name_updated = data_utils.rand_name('telemetry-alarm-update') - body = self.alarming_client.update_alarm( - alarm_id, - threshold_rule=new_rule, - name=alarm_name_updated, - type='threshold') - self.assertEqual(alarm_name_updated, body['name']) - self.assertDictContainsSubset(new_rule, body['threshold_rule']) - # Get and verify details of an alarm after update - body = self.alarming_client.show_alarm(alarm_id) - self.assertEqual(alarm_name_updated, body['name']) - self.assertDictContainsSubset(new_rule, body['threshold_rule']) - # Get history for the alarm and verify the same - body = self.alarming_client.show_alarm_history(alarm_id) - self.assertEqual("rule change", body[0]['type']) - self.assertIn(alarm_name_updated, body[0]['detail']) - self.assertEqual("creation", body[1]['type']) - self.assertIn(alarm_name, body[1]['detail']) - # Delete alarm and verify if deleted - self.alarming_client.delete_alarm(alarm_id) - self.assertRaises(lib_exc.NotFound, - self.alarming_client.show_alarm, alarm_id) - - @decorators.idempotent_id('aca49486-70bb-4016-87e0-f6131374f742') - def test_set_get_alarm_state(self): - alarm_states = ['ok', 'alarm', 'insufficient data'] - alarm = self.create_alarm(threshold_rule=self.rule) - # Set alarm state and verify - new_state =\ - [elem for elem in alarm_states if elem != alarm['state']][0] - state = self.alarming_client.alarm_set_state(alarm['alarm_id'], - new_state) - self.assertEqual(new_state, state.data) - # Get alarm state and verify - state = self.alarming_client.show_alarm_state(alarm['alarm_id']) - self.assertEqual(new_state, state.data) diff --git a/aodh/tests/tempest/api/test_alarming_api_negative.py b/aodh/tests/tempest/api/test_alarming_api_negative.py deleted file mode 100644 index aa6c082d..00000000 --- a/aodh/tests/tempest/api/test_alarming_api_negative.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2015 GlobalLogic. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators -from tempest.lib import exceptions as lib_exc - -from aodh.tests.tempest.api import base - - -class TelemetryAlarmingNegativeTest(base.BaseAlarmingTest): - """Negative tests for show_alarm, update_alarm, show_alarm_history tests - - ** show non-existent alarm - ** show the deleted alarm - ** delete deleted alarm - ** update deleted alarm - """ - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81e7e') - def test_get_non_existent_alarm(self): - # get the non-existent alarm - non_existent_id = uuidutils.generate_uuid() - self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm, - non_existent_id) - - @decorators.attr(type=['negative']) - @decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2582ae') - def test_get_update_show_history_delete_deleted_alarm(self): - # get, update and delete the deleted alarm - alarm_name = data_utils.rand_name('telemetry_alarm') - rule = {'meter_name': 'cpu', - 'comparison_operator': 'eq', - 'threshold': 100.0, - 'period': 90} - body = self.alarming_client.create_alarm( - name=alarm_name, - type='threshold', - threshold_rule=rule) - alarm_id = body['alarm_id'] - self.alarming_client.delete_alarm(alarm_id) - # get the deleted alarm - self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm, - alarm_id) - - # update the deleted alarm - updated_alarm_name = data_utils.rand_name('telemetry_alarm_updated') - updated_rule = {'meter_name': 'cpu_new', - 'comparison_operator': 'eq', - 'threshold': 70, - 'period': 50} - self.assertRaises(lib_exc.NotFound, self.alarming_client.update_alarm, - alarm_id, threshold_rule=updated_rule, - name=updated_alarm_name, - type='threshold') - # delete the deleted alarm - self.assertRaises(lib_exc.NotFound, self.alarming_client.delete_alarm, - alarm_id) diff --git a/aodh/tests/tempest/config.py b/aodh/tests/tempest/config.py deleted file mode 100644 index cf742b7d..00000000 --- a/aodh/tests/tempest/config.py +++ /dev/null @@ -1,36 +0,0 @@ -# -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -service_option = [ - cfg.BoolOpt("aodh_plugin", - default=True, - help="Whether or not Aodh is expected to be available"), -] - -alarming_group = cfg.OptGroup(name='alarming_plugin', - title='Alarming Service Options') - -AlarmingGroup = [ - cfg.StrOpt('catalog_type', - default='alarming', - help="Catalog type of the Alarming service."), - cfg.StrOpt('endpoint_type', - default='publicURL', - choices=['public', 'admin', 'internal', - 'publicURL', 'adminURL', 'internalURL'], - help="The endpoint type to use for the alarming service."), -] diff --git a/aodh/tests/tempest/plugin.py b/aodh/tests/tempest/plugin.py deleted file mode 100644 index 70b98c5f..00000000 --- a/aodh/tests/tempest/plugin.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from tempest import config -from tempest.test_discover import plugins - -import aodh -from aodh.tests.tempest import config as tempest_config - - -class AodhTempestPlugin(plugins.TempestPlugin): - - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(aodh.__file__)))[0] - test_dir = "aodh/tests/tempest" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - config.register_opt_group(conf, - config.service_available_group, - tempest_config.service_option) - config.register_opt_group(conf, - tempest_config.alarming_group, - tempest_config.AlarmingGroup) - - def get_opt_lists(self): - return [ - (tempest_config.alarming_group.name, - tempest_config.AlarmingGroup), - ('service_available', tempest_config.service_option) - ] diff --git a/aodh/tests/tempest/service/__init__.py b/aodh/tests/tempest/service/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/tempest/service/client.py b/aodh/tests/tempest/service/client.py deleted file mode 100644 index 236d0efb..00000000 --- a/aodh/tests/tempest/service/client.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils as json -from six.moves.urllib import parse as urllib -from tempest import config -from tempest.lib.common import rest_client -from tempest import manager - - -CONF = config.CONF - - -class AlarmingClient(rest_client.RestClient): - - version = '2' - uri_prefix = "v2" - - def deserialize(self, body): - return json.loads(body.replace("\n", "")) - - def serialize(self, body): - return json.dumps(body) - - def list_alarms(self, query=None): - uri = '%s/alarms' % self.uri_prefix - uri_dict = {} - if query: - uri_dict = {'q.field': query[0], - 'q.op': query[1], - 'q.value': query[2]} - if uri_dict: - uri += "?%s" % urllib.urlencode(uri_dict) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBodyList(resp, body) - - def show_alarm(self, alarm_id): - uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - def show_alarm_history(self, alarm_id): - uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBodyList(resp, body) - - def delete_alarm(self, alarm_id): - uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id) - resp, body = self.delete(uri) - self.expected_success(204, resp.status) - if body: - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - def create_alarm(self, **kwargs): - uri = "%s/alarms" % self.uri_prefix - body = self.serialize(kwargs) - resp, body = self.post(uri, body) - self.expected_success(201, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - def update_alarm(self, alarm_id, **kwargs): - uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id) - body = self.serialize(kwargs) - resp, body = self.put(uri, body) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - def show_alarm_state(self, alarm_id): - uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBodyData(resp, body) - - def alarm_set_state(self, alarm_id, state): - uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id) - body = self.serialize(state) - resp, body = self.put(uri, body) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBodyData(resp, body) - - -class Manager(manager.Manager): - - default_params = { - 'disable_ssl_certificate_validation': - CONF.identity.disable_ssl_certificate_validation, - 'ca_certs': CONF.identity.ca_certificates_file, - 'trace_requests': CONF.debug.trace_requests - } - - alarming_params = { - 'service': CONF.alarming_plugin.catalog_type, - 'region': CONF.identity.region, - 'endpoint_type': CONF.alarming_plugin.endpoint_type, - } - alarming_params.update(default_params) - - def __init__(self, credentials=None, service=None): - super(Manager, self).__init__(credentials) - self.set_alarming_client() - - def set_alarming_client(self): - self.alarming_client = AlarmingClient(self.auth_provider, - **self.alarming_params) diff --git a/aodh/tests/unit/__init__.py b/aodh/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/unit/evaluator/__init__.py b/aodh/tests/unit/evaluator/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aodh/tests/unit/evaluator/base.py b/aodh/tests/unit/evaluator/base.py deleted file mode 100644 index 047be2b8..00000000 --- a/aodh/tests/unit/evaluator/base.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2013 eNovance -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import fixtures -import mock -from oslo_config import fixture -from oslotest import base - -from aodh import service - - -class TestEvaluatorBase(base.BaseTestCase): - def setUp(self): - super(TestEvaluatorBase, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.conf = self.useFixture(fixture.Config(conf)).conf - self.api_client = mock.Mock() - self.useFixture( - fixtures.MockPatch('ceilometerclient.client.get_client', - return_value=self.api_client)) - self.evaluator = self.EVALUATOR(self.conf) - self.notifier = mock.MagicMock() - self.evaluator.notifier = self.notifier - self.storage_conn = mock.MagicMock() - self.evaluator.storage_conn = self.storage_conn - self.evaluator._ks_client = mock.Mock(user_id='fake_user_id', - project_id='fake_project_id', - auth_token='fake_token') - self.prepare_alarms() - - def prepare_alarms(self): - self.alarms = [] - - def _evaluate_all_alarms(self): - for alarm in self.alarms: - self.evaluator.evaluate(alarm) - - def _set_all_alarms(self, state): - for alarm in self.alarms: - alarm.state = state - - def _assert_all_alarms(self, state): - for alarm in self.alarms: - self.assertEqual(state, alarm.state) diff --git a/aodh/tests/unit/evaluator/test_base.py b/aodh/tests/unit/evaluator/test_base.py deleted file mode 100644 index 4dd8771c..00000000 --- a/aodh/tests/unit/evaluator/test_base.py +++ /dev/null @@ -1,159 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -import mock -from oslo_utils import timeutils -from oslotest import base - -from aodh import evaluator -from aodh import queue - - -class TestEvaluatorBaseClass(base.BaseTestCase): - def setUp(self): - super(TestEvaluatorBaseClass, self).setUp() - self.called = False - - def _notify(self, alarm, previous, reason, details): - self.called = True - raise Exception('Boom!') - - @mock.patch.object(queue, 'AlarmNotifier') - def test_base_refresh(self, notifier): - notifier.notify = self._notify - - class EvaluatorSub(evaluator.Evaluator): - def evaluate(self, alarm): - pass - - ev = EvaluatorSub(mock.MagicMock()) - ev.notifier = notifier - ev.storage_conn = mock.MagicMock() - ev._record_change = mock.MagicMock() - ev._refresh(mock.MagicMock(), mock.MagicMock(), - mock.MagicMock(), mock.MagicMock()) - ev.storage_conn.update_alarm.assert_called_once_with(mock.ANY) - ev._record_change.assert_called_once_with(mock.ANY, mock.ANY) - self.assertTrue(self.called) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints(self, mock_utcnow): - alarm = mock.MagicMock() - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': ''}, - {'name': 'test2', - 'description': 'test', - 'start': '0 23 * * *', # daily at 23:00 - 'duration': 10800, # 3 hours - 'timezone': ''}, - ] - cls = evaluator.Evaluator - mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 12, 0, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 1, 0, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 5, 0, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints_by_month(self, mock_utcnow): - alarm = mock.MagicMock() - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 31 1,3,5,7,8,10,12 *', # every 31st at 11:00 - 'duration': 10800, # 3 hours - 'timezone': ''}, - ] - cls = evaluator.Evaluator - mock_utcnow.return_value = datetime.datetime(2015, 3, 31, 11, 30, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints_complex(self, mock_utcnow): - alarm = mock.MagicMock() - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - # Every consecutive 2 minutes (from the 3rd to the 57th) past - # every consecutive 2 hours (between 3:00 and 12:59) on every day. - 'start': '3-57/2 3-12/2 * * *', - 'duration': 30, - 'timezone': ''} - ] - cls = evaluator.Evaluator - - # test minutes inside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 3, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 57, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - # test minutes outside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 2, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 4, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 58, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - - # test hours inside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 5, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 11, 31, 0) - self.assertTrue(cls.within_time_constraint(alarm)) - - # test hours outside - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 1, 31, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 4, 31, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 12, 31, 0) - self.assertFalse(cls.within_time_constraint(alarm)) - - @mock.patch.object(timeutils, 'utcnow') - def test_base_time_constraints_timezone(self, mock_utcnow): - alarm = mock.MagicMock() - cls = evaluator.Evaluator - mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 11, 0, 0) - - alarm.time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.assertTrue(cls.within_time_constraint(alarm)) - - alarm.time_constraints = [ - {'name': 'test2', - 'description': 'test2', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'US/Eastern'} - ] - self.assertFalse(cls.within_time_constraint(alarm)) diff --git a/aodh/tests/unit/evaluator/test_composite.py b/aodh/tests/unit/evaluator/test_composite.py deleted file mode 100644 index ee67dc2b..00000000 --- a/aodh/tests/unit/evaluator/test_composite.py +++ /dev/null @@ -1,508 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for aodh/evaluator/composite.py -""" - -from ceilometerclient.v2 import statistics -import fixtures -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from six import moves - -from aodh import evaluator -from aodh.evaluator import composite -from aodh.storage import models -from aodh.tests import constants -from aodh.tests.unit.evaluator import base - - -class BaseCompositeEvaluate(base.TestEvaluatorBase): - EVALUATOR = composite.CompositeEvaluator - - def setUp(self): - self.client = self.useFixture(fixtures.MockPatch( - 'aodh.evaluator.gnocchi.client' - )).mock.Client.return_value - super(BaseCompositeEvaluate, self).setUp() - - @staticmethod - def _get_stats(attr, value, count=1): - return statistics.Statistics(None, {attr: value, 'count': count}) - - @staticmethod - def _get_gnocchi_stats(granularity, values): - now = timeutils.utcnow_ts() - return [[six.text_type(now - len(values) * granularity), - granularity, value] for value in values] - - @staticmethod - def _reason(new_state, user_expression, causative_rules=(), - transition=True): - root_cause_rules = {} - for index, rule in causative_rules: - name = 'rule%s' % index - root_cause_rules.update({name: rule}) - description = {evaluator.ALARM: 'outside their threshold.', - evaluator.OK: 'inside their threshold.', - evaluator.UNKNOWN: 'state evaluated to unknown.'} - params = {'state': new_state, - 'expression': user_expression, - 'rules': ', '.join(sorted(six.iterkeys(root_cause_rules))), - 'description': description[new_state]} - reason_data = { - 'type': 'composite', - 'composition_form': user_expression} - reason_data.update(causative_rules=root_cause_rules) - if transition: - reason = ('Composite rule alarm with composition form: ' - '%(expression)s transition to %(state)s, due to ' - 'rules: %(rules)s %(description)s' % params) - else: - reason = ('Composite rule alarm with composition form: ' - '%(expression)s remaining as %(state)s, due to ' - 'rules: %(rules)s %(description)s' % params) - return reason, reason_data - - -class CompositeTest(BaseCompositeEvaluate): - sub_rule1 = { - "type": "threshold", - "meter_name": "cpu_util", - "evaluation_periods": 5, - "threshold": 0.8, - "query": [{ - "field": "metadata.metering.stack_id", - "value": "36b20eb3-d749-4964-a7d2-a71147cd8145", - "op": "eq" - }], - "statistic": "avg", - "period": 60, - "exclude_outliers": False, - "comparison_operator": "gt" - } - - sub_rule2 = { - "type": "threshold", - "meter_name": "disk.iops", - "evaluation_periods": 4, - "threshold": 200, - "query": [{ - "field": "metadata.metering.stack_id", - "value": "36b20eb3-d749-4964-a7d2-a71147cd8145", - "op": "eq" - }], - "statistic": "max", - "period": 60, - "exclude_outliers": False, - "comparison_operator": "gt" - } - - sub_rule3 = { - "type": "threshold", - "meter_name": "network.incoming.packets.rate", - "evaluation_periods": 3, - "threshold": 1000, - "query": [{ - "field": "metadata.metering.stack_id", - "value": "36b20eb3-d749-4964-a7d2-a71147cd8145", - "op": "eq" - }], - "statistic": "avg", - "period": 60, - "exclude_outliers": False, - "comparison_operator": "gt" - } - - sub_rule4 = { - "type": "gnocchi_resources_threshold", - 'comparison_operator': 'gt', - 'threshold': 80.0, - 'evaluation_periods': 5, - 'aggregation_method': 'mean', - 'granularity': 60, - 'metric': 'cpu_util', - 'resource_type': 'instance', - 'resource_id': 'my_instance', - } - - sub_rule5 = { - "type": "gnocchi_aggregation_by_metrics_threshold", - 'comparison_operator': 'le', - 'threshold': 10.0, - 'evaluation_periods': 4, - 'aggregation_method': 'max', - 'granularity': 300, - 'metrics': ['0bb1604d-1193-4c0a-b4b8-74b170e35e83', - '9ddc209f-42f8-41e1-b8f1-8804f59c4053'] - } - - sub_rule6 = { - "type": "gnocchi_aggregation_by_resources_threshold", - 'comparison_operator': 'gt', - 'threshold': 80.0, - 'evaluation_periods': 6, - 'aggregation_method': 'mean', - 'granularity': 50, - 'metric': 'cpu_util', - 'resource_type': 'instance', - 'query': '{"=": {"server_group": "my_autoscaling_group"}}' - } - - def prepare_alarms(self): - self.alarms = [ - models.Alarm(name='alarm_threshold_nest', - description='alarm with sub rules nested combined', - type='composite', - enabled=True, - user_id='fake_user', - project_id='fake_project', - alarm_id=uuidutils.generate_uuid(), - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule={ - "or": [self.sub_rule1, - {"and": [self.sub_rule2, self.sub_rule3] - }] - }, - severity='critical'), - models.Alarm(name='alarm_threshold_or', - description='alarm on one of sub rules triggered', - type='composite', - enabled=True, - user_id='fake_user', - project_id='fake_project', - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - alarm_id=uuidutils.generate_uuid(), - time_constraints=[], - rule={ - "or": [self.sub_rule1, self.sub_rule2, - self.sub_rule3] - }, - severity='critical' - ), - models.Alarm(name='alarm_threshold_and', - description='alarm on all the sub rules triggered', - type='composite', - enabled=True, - user_id='fake_user', - project_id='fake_project', - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - alarm_id=uuidutils.generate_uuid(), - time_constraints=[], - rule={ - "and": [self.sub_rule1, self.sub_rule2, - self.sub_rule3] - }, - severity='critical' - ), - models.Alarm(name='alarm_multi_type_rules', - description='alarm with threshold and gnocchi rules', - type='composite', - enabled=True, - user_id='fake_user', - project_id='fake_project', - alarm_id=uuidutils.generate_uuid(), - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule={ - "and": [self.sub_rule2, self.sub_rule3, - {'or': [self.sub_rule1, self.sub_rule4, - self.sub_rule5, self.sub_rule6]}] - }, - severity='critical' - ), - ] - - def test_simple_insufficient(self): - self._set_all_alarms('ok') - self.api_client.statistics.list.return_value = [] - self.client.metric.aggregation.return_value = [] - self.client.metric.get_measures.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call(self.alarms[0], - 'ok', - *self._reason( - 'insufficient data', - '(rule1 or (rule2 and rule3))', - ((1, self.sub_rule1), (2, self.sub_rule2), - (3, self.sub_rule3)))), - mock.call(self.alarms[1], - 'ok', - *self._reason( - 'insufficient data', - '(rule1 or rule2 or rule3)', - ((1, self.sub_rule1), (2, self.sub_rule2), - (3, self.sub_rule3)))), - mock.call(self.alarms[2], - 'ok', - *self._reason( - 'insufficient data', - '(rule1 and rule2 and rule3)', - ((1, self.sub_rule1), (2, self.sub_rule2), - (3, self.sub_rule3)))), - mock.call( - self.alarms[3], - 'ok', - *self._reason( - 'insufficient data', - '(rule1 and rule2 and (rule3 or rule4 or rule5 ' - 'or rule6))', - ((1, self.sub_rule2), (2, self.sub_rule3), - (3, self.sub_rule1), (4, self.sub_rule4), - (5, self.sub_rule5), (6, self.sub_rule6))))] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_alarm_full_trip_with_multi_type_rules(self): - alarm = self.alarms[3] - alarm.state = 'ok' - # following results of sub-rules evaluation to trigger - # final "alarm" state: - # self.sub_rule2: alarm - # self.sub_rule3: alarm - # self.sub_rule1: ok - # self.sub_rule4: ok - # self.sub_rule5: ok - # self.sub_rule6: alarm - maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) - for v in moves.xrange(1, 5)] - avgs1 = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) - for v in moves.xrange(1, 4)] - avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) - for v in moves.xrange(1, 6)] - - gavgs1 = self._get_gnocchi_stats(60, [self.sub_rule4['threshold'] - - v for v in moves.xrange(1, 6)]) - gmaxs = self._get_gnocchi_stats(300, [self.sub_rule5['threshold'] + v - for v in moves.xrange(1, 5)]) - gavgs2 = self._get_gnocchi_stats(50, [self.sub_rule6['threshold'] + v - for v in moves.xrange(1, 7)]) - - self.api_client.statistics.list.side_effect = [maxs, avgs1, avgs2] - self.client.metric.get_measures.side_effect = [gavgs1] - self.client.metric.aggregation.side_effect = [gmaxs, gavgs2] - self.evaluator.evaluate(alarm) - self.assertEqual(3, self.api_client.statistics.list.call_count) - self.assertEqual(1, self.client.metric.get_measures.call_count) - self.assertEqual(2, self.client.metric.aggregation.call_count) - self.assertEqual('alarm', alarm.state) - expected = mock.call( - alarm, 'ok', - *self._reason( - 'alarm', - '(rule1 and rule2 and (rule3 or rule4 or rule5 or rule6))', - ((1, self.sub_rule2), (2, self.sub_rule3), - (6, self.sub_rule6)))) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_alarm_with_short_circuit_logic(self): - alarm = self.alarms[1] - # self.sub_rule1: alarm - avgs = [self._get_stats('avg', self.sub_rule1['threshold'] + 0.01 * v) - for v in moves.xrange(1, 6)] - self.api_client.statistics.list.side_effect = [avgs] - self.evaluator.evaluate(alarm) - self.assertEqual('alarm', alarm.state) - self.assertEqual(1, self.api_client.statistics.list.call_count) - expected = mock.call(self.alarms[1], 'insufficient data', - *self._reason( - 'alarm', - '(rule1 or rule2 or rule3)', - ((1, self.sub_rule1),))) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_ok_with_short_circuit_logic(self): - alarm = self.alarms[2] - # self.sub_rule1: ok - avgs = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) - for v in moves.xrange(1, 6)] - self.api_client.statistics.list.side_effect = [avgs] - self.evaluator.evaluate(alarm) - self.assertEqual('ok', alarm.state) - self.assertEqual(1, self.api_client.statistics.list.call_count) - expected = mock.call(self.alarms[2], 'insufficient data', - *self._reason( - 'ok', - '(rule1 and rule2 and rule3)', - ((1, self.sub_rule1),))) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_unknown_state_with_sub_rules_trending_state(self): - alarm = self.alarms[0] - maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) - for v in moves.xrange(-1, 4)] - avgs = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) - for v in moves.xrange(-1, 3)] - avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) - for v in moves.xrange(1, 6)] - self.api_client.statistics.list.side_effect = [avgs2, maxs, avgs] - self.evaluator.evaluate(alarm) - self.assertEqual('alarm', alarm.state) - expected = mock.call(self.alarms[0], 'insufficient data', - *self._reason( - 'alarm', - '(rule1 or (rule2 and rule3))', - ((2, self.sub_rule2), - (3, self.sub_rule3)))) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_known_state_with_sub_rules_trending_state(self): - alarm = self.alarms[0] - alarm.repeat_actions = True - alarm.state = 'ok' - maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) - for v in moves.xrange(-1, 4)] - avgs = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) - for v in moves.xrange(-1, 3)] - avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) - for v in moves.xrange(1, 6)] - self.api_client.statistics.list.side_effect = [avgs2, maxs, avgs] - self.evaluator.evaluate(alarm) - self.assertEqual('ok', alarm.state) - expected = mock.call(self.alarms[0], 'ok', - *self._reason( - 'ok', - '(rule1 or (rule2 and rule3))', - ((1, self.sub_rule1), - (2, self.sub_rule2), - (3, self.sub_rule3)), False)) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_known_state_with_sub_rules_trending_state_and_not_repeat(self): - alarm = self.alarms[2] - alarm.state = 'ok' - maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) - for v in moves.xrange(-1, 4)] - avgs = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) - for v in moves.xrange(-1, 3)] - avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) - for v in moves.xrange(1, 6)] - self.api_client.statistics.list.side_effect = [avgs2, maxs, avgs] - self.evaluator.evaluate(alarm) - self.assertEqual('ok', alarm.state) - self.assertEqual([], self.notifier.notify.mock_calls) - - -class OtherCompositeTest(BaseCompositeEvaluate): - sub_rule1 = { - 'evaluation_periods': 3, - 'metric': 'radosgw.objects.containers', - 'resource_id': 'alarm-resource-1', - 'aggregation_method': 'mean', - 'granularity': 60, - 'threshold': 5.0, - 'type': 'gnocchi_resources_threshold', - 'comparison_operator': 'ge', - 'resource_type': 'ceph_account' - } - - sub_rule2 = { - 'evaluation_periods': 3, - 'metric': 'radosgw.objects.containers', - 'resource_id': 'alarm-resource-2', - 'aggregation_method': 'mean', - 'granularity': 60, - 'threshold': 5.0, - 'type': 'gnocchi_resources_threshold', - 'comparison_operator': 'ge', - 'resource_type': 'ceph_account' - } - - def prepare_alarms(self): - self.alarms = [ - models.Alarm(name='composite-GRT-OR-GRT', - description='composite alarm converted', - type='composite', - enabled=True, - user_id='fake_user', - project_id='fake_project', - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=['log://'], - ok_actions=['log://'], - alarm_actions=['log://'], - repeat_actions=False, - alarm_id=uuidutils.generate_uuid(), - time_constraints=[], - rule={ - "or": [self.sub_rule1, self.sub_rule2] - }, - severity='critical' - ), - ] - - def test_simple_ok(self): - self._set_all_alarms('alarm') - - gavgs1 = [['2016-11-24T10:00:00+00:00', 3600.0, 3.0], - ['2016-11-24T10:00:00+00:00', 900.0, 3.0], - ['2016-11-24T10:00:00+00:00', 300.0, 3.0], - ['2016-11-24T10:01:00+00:00', 60.0, 2.0], - ['2016-11-24T10:02:00+00:00', 60.0, 3.0], - ['2016-11-24T10:03:00+00:00', 60.0, 4.0], - ['2016-11-24T10:04:00+00:00', 60.0, 5.0]] - - gavgs2 = [['2016-11-24T10:00:00+00:00', 3600.0, 3.0], - ['2016-11-24T10:00:00+00:00', 900.0, 3.0], - ['2016-11-24T10:00:00+00:00', 300.0, 3.0], - ['2016-11-24T10:01:00+00:00', 60.0, 2.0], - ['2016-11-24T10:02:00+00:00', 60.0, 3.0], - ['2016-11-24T10:03:00+00:00', 60.0, 4.0], - ['2016-11-24T10:04:00+00:00', 60.0, 5.0]] - - self.client.metric.get_measures.side_effect = [gavgs1, gavgs2] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call(self.alarms[0], 'alarm', - *self._reason('ok', '(rule1 or rule2)', - ((1, self.sub_rule1), - (2, self.sub_rule2))))] - self.assertEqual(expected, self.notifier.notify.call_args_list) diff --git a/aodh/tests/unit/evaluator/test_event.py b/aodh/tests/unit/evaluator/test_event.py deleted file mode 100644 index df51f6a9..00000000 --- a/aodh/tests/unit/evaluator/test_event.py +++ /dev/null @@ -1,423 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -import six - -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from aodh import evaluator -from aodh.evaluator import event as event_evaluator -from aodh.storage import models -from aodh.tests import constants -from aodh.tests.unit.evaluator import base - - -class TestEventAlarmEvaluate(base.TestEvaluatorBase): - EVALUATOR = event_evaluator.EventAlarmEvaluator - - @staticmethod - def _alarm(**kwargs): - alarm_id = kwargs.get('id') or uuidutils.generate_uuid() - return models.Alarm(name=kwargs.get('name', alarm_id), - type='event', - enabled=True, - alarm_id=alarm_id, - description='desc', - state=kwargs.get('state', 'insufficient data'), - state_reason='reason', - severity='critical', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - ok_actions=[], - insufficient_data_actions=[], - alarm_actions=[], - repeat_actions=kwargs.get('repeat', False), - user_id='user', - project_id=kwargs.get('project', ''), - time_constraints=[], - rule=dict(event_type=kwargs.get('event_type', '*'), - query=kwargs.get('query', []))) - - @staticmethod - def _event(**kwargs): - return {'message_id': kwargs.get('id') or uuidutils.generate_uuid(), - 'event_type': kwargs.get('event_type', 'type0'), - 'traits': kwargs.get('traits', [])} - - def _setup_alarm_storage(self, alarms): - self._stored_alarms = {a.alarm_id: copy.deepcopy(a) for a in alarms} - self._update_history = [] - - def get_alarms(**kwargs): - return (a for a in six.itervalues(self._stored_alarms)) - - def update_alarm(alarm): - self._stored_alarms[alarm.alarm_id] = copy.deepcopy(alarm) - self._update_history.append(dict(alarm_id=alarm.alarm_id, - state=alarm.state)) - - self.storage_conn.get_alarms.side_effect = get_alarms - self.storage_conn.update_alarm.side_effect = update_alarm - - def _setup_alarm_notifier(self): - self._notification_history = [] - - def notify(alarm, previous, reason, data): - self._notification_history.append(dict(alarm_id=alarm.alarm_id, - state=alarm.state, - previous=previous, - reason=reason, - data=data)) - - self.notifier.notify.side_effect = notify - - def _do_test_event_alarm(self, alarms, events, - expect_db_queries=None, - expect_alarm_states=None, - expect_alarm_updates=None, - expect_notifications=None): - self._setup_alarm_storage(alarms) - self._setup_alarm_notifier() - - self.evaluator.evaluate_events(events) - - if expect_db_queries is not None: - expected = [mock.call(enabled=True, - alarm_type='event', - project=p) for p in expect_db_queries] - self.assertEqual(expected, - self.storage_conn.get_alarms.call_args_list) - - if expect_alarm_states is not None: - for alarm_id, state in six.iteritems(expect_alarm_states): - self.assertEqual(state, self._stored_alarms[alarm_id].state) - - if expect_alarm_updates is not None: - self.assertEqual(len(expect_alarm_updates), - len(self._update_history)) - for alarm, h in zip(expect_alarm_updates, self._update_history): - expected = dict(alarm_id=alarm.alarm_id, - state=evaluator.ALARM) - self.assertEqual(expected, h) - - if expect_notifications is not None: - self.assertEqual(len(expect_notifications), - len(self._notification_history)) - for n, h in zip(expect_notifications, self._notification_history): - alarm = n['alarm'] - event = n['event'] - previous = n.get('previous', evaluator.UNKNOWN) - reason = ('Event hits the ' - 'query .') % { - 'e': event['message_id'], - 'type': event['event_type'], - 'query': jsonutils.dumps(alarm.rule['query'], - sort_keys=True)} - data = {'type': 'event', 'event': event} - expected = dict(alarm_id=alarm.alarm_id, - state=evaluator.ALARM, - previous=previous, - reason=reason, - data=data) - self.assertEqual(expected, h) - - def test_fire_alarm_in_the_same_project_id(self): - alarm = self._alarm(project='project1') - event = self._event(traits=[['project_id', 1, 'project1']]) - self._do_test_event_alarm( - [alarm], [event], - expect_db_queries=['project1'], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_fire_alarm_in_the_same_tenant_id(self): - alarm = self._alarm(project='project1') - event = self._event(traits=[['tenant_id', 1, 'project1']]) - self._do_test_event_alarm( - [alarm], [event], - expect_db_queries=['project1'], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_fire_alarm_in_project_none(self): - alarm = self._alarm(project='') - event = self._event() - self._do_test_event_alarm( - [alarm], [event], - expect_db_queries=[''], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_continue_following_evaluation_after_exception(self): - alarms = [ - self._alarm(id=1), - self._alarm(id=2), - ] - event = self._event() - - original = self.evaluator._fire_alarm - - with mock.patch.object(event_evaluator.EventAlarmEvaluator, - '_fire_alarm') as _fire_alarm: - def _side_effect(*args, **kwargs): - _fire_alarm.side_effect = original - return Exception('boom') - - _fire_alarm.side_effect = _side_effect - - self._do_test_event_alarm( - alarms, [event], - expect_alarm_states={alarms[0].alarm_id: evaluator.UNKNOWN, - alarms[1].alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarms[1]], - expect_notifications=[dict(alarm=alarms[1], event=event)]) - - def test_skip_event_missing_event_type(self): - alarm = self._alarm() - event = {'message_id': uuidutils.generate_uuid(), 'traits': []} - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_skip_event_missing_message_id(self): - alarm = self._alarm() - event = {'event_type': 'type1', 'traits': []} - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_continue_alarming_when_repeat_actions_enabled(self): - alarm = self._alarm(repeat=True, state=evaluator.ALARM) - event = self._event() - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event, - previous=evaluator.ALARM)]) - - def test_do_not_continue_alarming_when_repeat_actions_disabled(self): - alarm = self._alarm(repeat=False, state=evaluator.ALARM) - event = self._event() - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_skip_uninterested_event_type(self): - alarm = self._alarm(event_type='compute.instance.exists') - event = self._event(event_type='compute.instance.update') - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_fire_alarm_event_type_pattern_matched(self): - alarm = self._alarm(event_type='compute.instance.*') - event = self._event(event_type='compute.instance.update') - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_skip_event_type_pattern_unmatched(self): - alarm = self._alarm(event_type='compute.instance.*') - event = self._event(event_type='dummy.compute.instance') - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_fire_alarm_query_matched_string(self): - alarm = self._alarm(query=[dict(field="traits.state", - value="stopped", - op="eq")]) - event = self._event(traits=[['state', 1, 'stopped']]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_skip_query_unmatched_string(self): - alarm = self._alarm(query=[dict(field="traits.state", - value="stopped", - op="eq")]) - event = self._event(traits=[['state', 1, 'active']]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_fire_alarm_query_matched_integer(self): - alarm = self._alarm(query=[dict(field="traits.instance_type_id", - type="integer", - value="5", - op="eq")]) - event = self._event(traits=[['instance_type_id', 2, 5]]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_skip_query_unmatched_integer(self): - alarm = self._alarm(query=[dict(field="traits.instance_type_id", - type="integer", - value="5", - op="eq")]) - event = self._event(traits=[['instance_type_id', 2, 6]]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_fire_alarm_query_matched_float(self): - alarm = self._alarm(query=[dict(field="traits.io_read_kbs", - type="float", - value="123.456", - op="eq")]) - event = self._event(traits=[['io_read_kbs', 3, 123.456]]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_skip_query_unmatched_float(self): - alarm = self._alarm(query=[dict(field="traits.io_read_kbs", - type="float", - value="123.456", - op="eq")]) - event = self._event(traits=[['io_read_kbs', 3, 456.123]]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_fire_alarm_query_matched_datetime(self): - alarm = self._alarm(query=[dict(field="traits.created_at", - type="datetime", - value="2015-09-01T18:52:27.214309", - op="eq")]) - event = self._event(traits=[['created_at', 4, - '2015-09-01T18:52:27.214309']]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=event)]) - - def test_skip_query_unmatched_datetime(self): - alarm = self._alarm(query=[dict(field="traits.created_at", - type="datetime", - value="2015-09-01T18:52:27.214309", - op="eq")]) - event = self._event(traits=[['created_at', 4, - '2015-09-02T18:52:27.214309']]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_skip_alarm_due_to_uncompareable_trait(self): - alarm = self._alarm(query=[dict(field="traits.created_at", - type="datetime", - value="2015-09-01T18:52:27.214309", - op="eq")]) - event = self._event(traits=[['created_at', 3, 123.456]]) - self._do_test_event_alarm( - [alarm], [event], - expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, - expect_alarm_updates=[], - expect_notifications=[]) - - def test_event_alarm_cache_hit(self): - alarm = self._alarm(project='project2', event_type='none') - events = [ - self._event(traits=[['project_id', 1, 'project2']]), - self._event(traits=[['project_id', 1, 'project2']]), - ] - self._do_test_event_alarm([alarm], events, - expect_db_queries=['project2']) - - def test_event_alarm_cache_updated_after_fired(self): - alarm = self._alarm(project='project2', event_type='type1', - repeat=False) - events = [ - self._event(event_type='type1', - traits=[['project_id', 1, 'project2']]), - self._event(event_type='type1', - traits=[['project_id', 1, 'project2']]), - ] - self._do_test_event_alarm( - [alarm], events, - expect_db_queries=['project2'], - expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, - expect_alarm_updates=[alarm], - expect_notifications=[dict(alarm=alarm, event=events[0])]) - - def test_event_alarm_caching_disabled(self): - alarm = self._alarm(project='project2', event_type='none') - events = [ - self._event(traits=[['project_id', 1, 'project2']]), - self._event(traits=[['project_id', 1, 'project2']]), - ] - self.evaluator.conf.event_alarm_cache_ttl = 0 - self._do_test_event_alarm([alarm], events, - expect_db_queries=['project2', 'project2']) - - @mock.patch.object(timeutils, 'utcnow') - def test_event_alarm_cache_expired(self, mock_utcnow): - alarm = self._alarm(project='project2', event_type='none') - events = [ - self._event(traits=[['project_id', 1, 'project2']]), - self._event(traits=[['project_id', 1, 'project2']]), - ] - mock_utcnow.side_effect = [ - datetime.datetime(2015, 1, 1, 0, 0, 0), - datetime.datetime(2015, 1, 1, 1, 0, 0), - datetime.datetime(2015, 1, 1, 1, 1, 0), - ] - self._do_test_event_alarm([alarm], events, - expect_db_queries=['project2', 'project2']) - - def test_event_alarm_cache_miss(self): - events = [ - self._event(traits=[['project_id', 1, 'project2']]), - self._event(traits=[['project_id', 1, 'project3']]), - ] - self._do_test_event_alarm([], events, - expect_db_queries=['project2', 'project3']) diff --git a/aodh/tests/unit/evaluator/test_gnocchi.py b/aodh/tests/unit/evaluator/test_gnocchi.py deleted file mode 100644 index a3122065..00000000 --- a/aodh/tests/unit/evaluator/test_gnocchi.py +++ /dev/null @@ -1,491 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import fixtures -import unittest - -from gnocchiclient import exceptions -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils -import pytz -import six -from six import moves - -from aodh.evaluator import gnocchi -from aodh.storage import models -from aodh.tests import constants -from aodh.tests.unit.evaluator import base - - -class TestGnocchiEvaluatorBase(base.TestEvaluatorBase): - def setUp(self): - self.client = self.useFixture(fixtures.MockPatch( - 'aodh.evaluator.gnocchi.client' - )).mock.Client.return_value - self.prepared_alarms = [ - models.Alarm(name='instance_running_hot', - description='instance_running_hot', - type='gnocchi_resources_threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=uuidutils.generate_uuid(), - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - comparison_operator='gt', - threshold=80.0, - evaluation_periods=5, - aggregation_method='mean', - granularity=60, - metric='cpu_util', - resource_type='instance', - resource_id='my_instance') - ), - models.Alarm(name='group_running_idle', - description='group_running_idle', - type='gnocchi_aggregation_by_metrics_threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - alarm_id=uuidutils.generate_uuid(), - time_constraints=[], - rule=dict( - comparison_operator='le', - threshold=10.0, - evaluation_periods=4, - aggregation_method='max', - granularity=300, - metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83', - '9ddc209f-42f8-41e1-b8f1-8804f59c4053']), - ), - models.Alarm(name='instance_not_running', - description='instance_running_hot', - type='gnocchi_aggregation_by_resources_threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=uuidutils.generate_uuid(), - state='insufficient data', - state_reason='insufficient data', - state_timestamp=constants.MIN_DATETIME, - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - comparison_operator='gt', - threshold=80.0, - evaluation_periods=6, - aggregation_method='mean', - granularity=50, - metric='cpu_util', - resource_type='instance', - query='{"=": {"server_group": ' - '"my_autoscaling_group"}}') - ), - - ] - super(TestGnocchiEvaluatorBase, self).setUp() - - @staticmethod - def _get_stats(granularity, values): - now = timeutils.utcnow_ts() - return [[six.text_type(now - len(values) * granularity), - granularity, value] for value in values] - - @staticmethod - def _reason_data(disposition, count, most_recent): - return {'type': 'threshold', 'disposition': disposition, - 'count': count, 'most_recent': most_recent} - - def _set_all_rules(self, field, value): - for alarm in self.alarms: - alarm.rule[field] = value - - def _test_retry_transient(self): - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - - def _test_simple_insufficient(self): - self._set_all_alarms('ok') - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call( - alarm, - 'ok', - ('No datapoint for granularity %s' - % alarm.rule['granularity']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - None)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - -class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase): - EVALUATOR = gnocchi.GnocchiResourceThresholdEvaluator - - def prepare_alarms(self): - self.alarms = self.prepared_alarms[0:1] - - def test_retry_transient_api_failure(self): - means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(5)]) - self.client.metric.get_measures.side_effect = [ - exceptions.ClientException(501, "error2"), means] - self._test_retry_transient() - - def test_simple_insufficient(self): - self.client.metric.get_measures.return_value = [] - self._test_simple_insufficient() - - @mock.patch.object(timeutils, 'utcnow') - def test_simple_alarm_trip(self, utcnow): - utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) - self._set_all_alarms('ok') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - self.client.metric.get_measures.side_effect = [avgs] - self._evaluate_all_alarms() - start_alarm = "2015-01-26T12:51:00" - end = "2015-01-26T12:57:00" - - self.assertEqual( - [mock.call.get_measures(aggregation='mean', metric='cpu_util', - resource_id='my_instance', - start=start_alarm, stop=end)], - self.client.metric.mock_calls) - - reason = ('Transition to alarm due to 5 samples outside threshold,' - ' most recent: %s' % avgs[-1][2]) - reason_data = self._reason_data('outside', 5, avgs[-1][2]) - expected = mock.call(self.alarms[0], 'ok', reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_simple_alarm_clear(self): - self._set_all_alarms('alarm') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(5)]) - - self.client.metric.get_measures.side_effect = [avgs] - - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - - reason = ('Transition to ok due to 5 samples inside' - ' threshold, most recent: %s' % avgs[-1][2]) - - reason_data = self._reason_data('inside', 5, avgs[-1][2]) - expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_equivocal_from_known_state_ok(self): - self._set_all_alarms('ok') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(5)]) - self.client.metric.get_measures.side_effect = [avgs] - - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual([], - self.storage_conn.update_alarm.call_args_list) - self.assertEqual([], self.notifier.notify.call_args_list) - - def test_state_change_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[0].repeat_actions = True - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - - self.client.metric.get_measures.side_effect = [avgs] - - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - - reason = ('Transition to alarm due to 5 samples outside ' - 'threshold, most recent: %s' % avgs[-1][2]) - reason_data = self._reason_data('outside', 5, avgs[-1][2]) - expected = mock.call(self.alarms[0], 'ok', reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_equivocal_from_unknown(self): - self._set_all_alarms('insufficient data') - avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 6)]) - - self.client.metric.get_measures.side_effect = [avgs] - - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - - reason = ('Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1][2]) - reason_data = self._reason_data('outside', 5, avgs[-1][2]) - expected = mock.call(self.alarms[0], 'insufficient data', - reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - @unittest.skipIf(six.PY3, - "the aodh base class is not python 3 ready") - @mock.patch.object(timeutils, 'utcnow') - def test_no_state_change_outside_time_constraint(self, mock_utcnow): - self._set_all_alarms('ok') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - dt = datetime.datetime(2014, 1, 1, 15, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - self.client.metric.get_measures.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual([], update_calls, - "Alarm should not change state if the current " - " time is outside its time constraint.") - self.assertEqual([], self.notifier.notify.call_args_list) - - -class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase): - EVALUATOR = gnocchi.GnocchiAggregationMetricsThresholdEvaluator - - def prepare_alarms(self): - self.alarms = self.prepared_alarms[1:2] - - def test_retry_transient_api_failure(self): - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(4)]) - self.client.metric.aggregation.side_effect = [Exception('boom'), maxs] - self._test_retry_transient() - - def test_simple_insufficient(self): - self.client.metric.aggregation.return_value = [] - self._test_simple_insufficient() - - @mock.patch.object(timeutils, 'utcnow') - def test_simple_alarm_trip(self, utcnow): - utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) - self._set_all_alarms('ok') - - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(4)]) - self.client.metric.aggregation.side_effect = [maxs] - self._evaluate_all_alarms() - start_alarm = "2015-01-26T12:32:00" - end = "2015-01-26T12:57:00" - - self.assertEqual( - [mock.call.aggregation(aggregation='max', - metrics=[ - '0bb1604d-1193-4c0a-b4b8-74b170e35e83', - '9ddc209f-42f8-41e1-b8f1-8804f59c4053'], - needed_overlap=0, - start=start_alarm, stop=end)], - self.client.metric.mock_calls) - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reason = ('Transition to alarm due to 4 samples outside ' - 'threshold, most recent: %s' % maxs[-1][2]) - - reason_data = self._reason_data('outside', 4, maxs[-1][2]) - expected = mock.call(self.alarms[0], 'ok', reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_simple_alarm_clear(self): - self._set_all_alarms('alarm') - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 5)]) - self.client.metric.aggregation.side_effect = [maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reason = ('Transition to ok due to 4 samples inside ' - 'threshold, most recent: %s' % maxs[-1][2]) - reason_data = self._reason_data('inside', 4, maxs[-1][2]) - expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) - - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_equivocal_from_known_state_ok(self): - self._set_all_alarms('ok') - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(-1, 3)]) - self.client.metric.aggregation.side_effect = [maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual( - [], - self.storage_conn.update_alarm.call_args_list) - self.assertEqual([], self.notifier.notify.call_args_list) - - def test_equivocal_ok_to_alarm(self): - self._set_all_alarms('ok') - # NOTE(sileht): we add one useless point (81.0) that will break - # the test if the evaluator doesn't remove it. - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(-1, 5)]) - self.client.metric.aggregation.side_effect = [maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - - def test_equivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[0].repeat_actions = True - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(-1, 3)]) - self.client.metric.aggregation.side_effect = [maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual([], self.storage_conn.update_alarm.call_args_list) - reason = ('Remaining as ok due to 1 samples inside' - ' threshold, most recent: 8.0') - reason_datas = self._reason_data('inside', 1, 8.0) - expected = [mock.call(self.alarms[0], 'ok', reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_unequivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('alarm') - self.alarms[0].repeat_actions = True - - maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(4)]) - self.client.metric.aggregation.side_effect = [maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - self.assertEqual([], self.storage_conn.update_alarm.call_args_list) - reason = ('Remaining as alarm due to 4 samples outside' - ' threshold, most recent: 7.0') - reason_datas = self._reason_data('outside', 4, 7.0) - expected = [mock.call(self.alarms[0], 'alarm', - reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - -class TestGnocchiAggregationResourcesThresholdEvaluate( - TestGnocchiEvaluatorBase): - EVALUATOR = gnocchi.GnocchiAggregationResourcesThresholdEvaluator - - def prepare_alarms(self): - self.alarms = self.prepared_alarms[2:3] - - def test_retry_transient_api_failure(self): - avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(6)]) - self.client.metric.aggregation.side_effect = [ - exceptions.ClientException(500, "error"), avgs2] - self._test_retry_transient() - - def test_simple_insufficient(self): - self.client.metric.aggregation.return_value = [] - self._test_simple_insufficient() - - @mock.patch.object(timeutils, 'utcnow') - def test_simple_alarm_trip(self, utcnow): - utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) - self._set_all_alarms('ok') - avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(1, 7)]) - - self.client.metric.aggregation.side_effect = [avgs] - self._evaluate_all_alarms() - start_alarm = "2015-01-26T12:51:10" - end = "2015-01-26T12:57:00" - self.assertEqual( - [mock.call.aggregation(aggregation='mean', metrics='cpu_util', - needed_overlap=0, - query={"=": {"server_group": - "my_autoscaling_group"}}, - resource_type='instance', - start=start_alarm, stop=end)], - self.client.metric.mock_calls) - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reason = ('Transition to alarm due to 6 samples outside ' - 'threshold, most recent: %s' % avgs[-1][2]) - reason_data = self._reason_data('outside', 6, avgs[-1][2]) - expected = mock.call(self.alarms[0], 'ok', reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_simple_alarm_clear(self): - self._set_all_alarms('alarm') - avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v - for v in moves.xrange(6)]) - self.client.metric.aggregation.side_effect = [avgs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reason = ('Transition to ok due to 6 samples inside ' - 'threshold, most recent: %s' % avgs[-1][2]) - reason_data = self._reason_data('inside', 6, avgs[-1][2]) - expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) - self.assertEqual(expected, self.notifier.notify.call_args) - - def test_equivocal_from_known_state_ok(self): - self._set_all_alarms('ok') - avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v - for v in moves.xrange(6)]) - self.client.metric.aggregation.side_effect = [avgs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual( - [], - self.storage_conn.update_alarm.call_args_list) - self.assertEqual([], self.notifier.notify.call_args_list) diff --git a/aodh/tests/unit/evaluator/test_threshold.py b/aodh/tests/unit/evaluator/test_threshold.py deleted file mode 100644 index 090dfce4..00000000 --- a/aodh/tests/unit/evaluator/test_threshold.py +++ /dev/null @@ -1,606 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for aodh/evaluator/threshold.py -""" -import copy -import datetime -import json - -from ceilometerclient import exc -from ceilometerclient.v2 import statistics -import mock -from oslo_utils import timeutils -from oslo_utils import uuidutils -import pytz -from six import moves - -from aodh.evaluator import threshold -from aodh import messaging -from aodh.storage import models -from aodh.tests import constants -from aodh.tests.unit.evaluator import base - - -class TestEvaluate(base.TestEvaluatorBase): - EVALUATOR = threshold.ThresholdEvaluator - - def prepare_alarms(self): - self.alarms = [ - models.Alarm(name='instance_running_hot', - description='instance_running_hot', - type='threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - alarm_id=uuidutils.generate_uuid(), - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - state_reason='Not evaluated', - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - time_constraints=[], - rule=dict( - comparison_operator='gt', - threshold=80.0, - evaluation_periods=5, - statistic='avg', - period=60, - meter_name='cpu_util', - query=[{'field': 'meter', - 'op': 'eq', - 'value': 'cpu_util'}, - {'field': 'resource_id', - 'op': 'eq', - 'value': 'my_instance'}]), - severity='critical' - ), - models.Alarm(name='group_running_idle', - description='group_running_idle', - type='threshold', - enabled=True, - user_id='foobar', - project_id='snafu', - state='insufficient data', - state_timestamp=constants.MIN_DATETIME, - state_reason='Not evaluated', - timestamp=constants.MIN_DATETIME, - insufficient_data_actions=[], - ok_actions=[], - alarm_actions=[], - repeat_actions=False, - alarm_id=uuidutils.generate_uuid(), - time_constraints=[], - rule=dict( - comparison_operator='le', - threshold=10.0, - evaluation_periods=4, - statistic='max', - period=300, - meter_name='cpu_util', - query=[{'field': 'meter', - 'op': 'eq', - 'value': 'cpu_util'}, - {'field': 'metadata.user_metadata.AS', - 'op': 'eq', - 'value': 'my_group'}]), - severity='critical' - ), - ] - - @staticmethod - def _get_stat(attr, value, count=1): - return statistics.Statistics(None, {attr: value, 'count': count}) - - @staticmethod - def _reason_data(disposition, count, most_recent): - return {'type': 'threshold', 'disposition': disposition, - 'count': count, 'most_recent': most_recent} - - def _set_all_rules(self, field, value): - for alarm in self.alarms: - alarm.rule[field] = value - - def test_retry_transient_api_failure(self): - broken = exc.CommunicationError(message='broken') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) - for v in moves.xrange(1, 5)] - self.api_client.statistics.list.side_effect = [broken, - broken, - avgs, - maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - - def test_simple_insufficient(self): - self._set_all_alarms('ok') - self.api_client.statistics.list.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - None)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_less_insufficient_data(self): - self._set_all_alarms('ok') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) - for v in moves.xrange(4)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(1, 4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(update_calls, expected) - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - alarm.rule['threshold'] - 3)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_trip(self): - self._set_all_alarms('ok') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), - self._reason_data('outside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @mock.patch.object(timeutils, 'utcnow') - def test_lag_configuration(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.api_client.statistics.list.side_effect = [] - - self._set_all_alarms('ok') - self._evaluate_all_alarms() - self._set_all_alarms('ok') - self.conf.set_override("additional_ingestion_lag", 42) - self._evaluate_all_alarms() - - self.assertEqual([ - mock.call( - meter_name='cpu_util', period=60, - q=[{'value': 'cpu_util', 'op': 'eq', 'field': 'meter'}, - {'value': 'my_instance', 'op': 'eq', - 'field': 'resource_id'}, - {'value': '2012-07-02T10:45:00', 'op': 'le', - 'field': 'timestamp'}, - {'value': '2012-07-02T10:39:00', 'op': 'ge', - 'field': 'timestamp'}]), - mock.call( - meter_name='cpu_util', period=300, - q=[{'value': 'cpu_util', 'op': 'eq', 'field': 'meter'}, - {'value': 'my_group', 'op': 'eq', - 'field': 'metadata.user_metadata.AS'}, - {'value': '2012-07-02T10:45:00', 'op': 'le', - 'field': 'timestamp'}, - {'value': '2012-07-02T10:20:00', 'op': 'ge', - 'field': 'timestamp'}]), - mock.call( - meter_name='cpu_util', period=60, - q=[{'value': 'cpu_util', 'op': 'eq', 'field': 'meter'}, - {'value': 'my_instance', 'op': 'eq', - 'field': 'resource_id'}, - {'value': '2012-07-02T10:45:00', 'op': 'le', - 'field': 'timestamp'}, - {'value': '2012-07-02T10:38:18', 'op': 'ge', - 'field': 'timestamp'}]), - mock.call( - meter_name='cpu_util', period=300, - q=[{'value': 'cpu_util', 'op': 'eq', 'field': 'meter'}, - {'value': 'my_group', 'op': 'eq', - 'field': 'metadata.user_metadata.AS'}, - {'value': '2012-07-02T10:45:00', 'op': 'le', - 'field': 'timestamp'}, - {'value': '2012-07-02T10:19:18', 'op': 'ge', - 'field': 'timestamp'}])], - self.api_client.statistics.list.mock_calls) - - def test_simple_alarm_clear(self): - self._set_all_alarms('alarm') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) - for v in moves.xrange(1, 5)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to ok due to 5 samples inside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to ok due to 4 samples inside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('inside', 5, avgs[-1].avg), - self._reason_data('inside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'alarm', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def _construct_payloads(self): - payloads = [] - reasons = ["Transition to alarm due to 5 samples outside threshold, " - "most recent: 85.0", - "Transition to alarm due to 4 samples outside threshold, " - "most recent: 7.0"] - for alarm in self.alarms: - num = self.alarms.index(alarm) - type = models.AlarmChange.STATE_TRANSITION - detail = json.dumps({'state': alarm.state, - 'transition_reason': reasons[num]}) - on_behalf_of = alarm.project_id - payload = dict( - event_id='fake_event_id_%s' % num, - alarm_id=alarm.alarm_id, - type=type, - detail=detail, - user_id='fake_user_id', - project_id='fake_project_id', - on_behalf_of=on_behalf_of, - timestamp=datetime.datetime(2015, 7, 26, 3, 33, 21, 876795)) - payloads.append(payload) - return payloads - - @mock.patch.object(uuidutils, 'generate_uuid') - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(messaging, 'get_notifier') - def test_alarm_change_record(self, get_notifier, utcnow, mock_uuid): - # the context.RequestContext() method need to generate uuid, - # so we need to provide 'fake_uuid_0' and 'fake_uuid_1' for that. - mock_uuid.side_effect = ['fake_event_id_0', 'fake_event_id_1'] - change_notifier = mock.MagicMock() - get_notifier.return_value = change_notifier - utcnow.return_value = datetime.datetime(2015, 7, 26, 3, 33, 21, 876795) - self._set_all_alarms('ok') - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - payloads = self._construct_payloads() - expected_payloads = [mock.call(p) for p in payloads] - change_records = \ - self.storage_conn.record_alarm_change.call_args_list - self.assertEqual(expected_payloads, change_records) - notify_calls = change_notifier.info.call_args_list - notification = "alarm.state_transition" - expected_payloads = [mock.call(mock.ANY, notification, p) - for p in payloads] - self.assertEqual(expected_payloads, notify_calls) - - def test_equivocal_from_known_state(self): - self._set_all_alarms('ok') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(-1, 3)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual( - [], - self.storage_conn.update_alarm.call_args_list) - self.assertEqual([], self.notifier.notify.call_args_list) - - def test_equivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[1].repeat_actions = True - avgs = [self._get_stat('avg', - self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(5)] - maxs = [self._get_stat('max', - self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(-1, 3)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - self.assertEqual([], - self.storage_conn.update_alarm.call_args_list) - reason = ('Remaining as ok due to 1 samples inside' - ' threshold, most recent: 8.0') - reason_datas = self._reason_data('inside', 1, 8.0) - expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_unequivocal_from_known_state_and_repeat_actions(self): - self._set_all_alarms('alarm') - self.alarms[1].repeat_actions = True - avgs = [self._get_stat('avg', - self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', - self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - self.assertEqual([], - self.storage_conn.update_alarm.call_args_list) - reason = ('Remaining as alarm due to 4 samples outside' - ' threshold, most recent: 7.0') - reason_datas = self._reason_data('outside', 4, 7.0) - expected = [mock.call(self.alarms[1], 'alarm', - reason, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_state_change_and_repeat_actions(self): - self._set_all_alarms('ok') - self.alarms[0].repeat_actions = True - self.alarms[1].repeat_actions = True - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), - self._reason_data('outside', 4, maxs[-1].max)] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_evaluation_keep_alarm_attributes_constant(self): - self._set_all_alarms('ok') - original_alarms = copy.deepcopy(self.alarms) - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(4)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - primitive_alarms = [a.as_dict() for a in self.alarms] - for alarm in original_alarms: - alarm.state = 'alarm' - alarm.state_reason = mock.ANY - primitive_original_alarms = [a.as_dict() for a in original_alarms] - self.assertEqual(primitive_original_alarms, primitive_alarms) - - def test_equivocal_from_unknown(self): - self._set_all_alarms('insufficient data') - avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) - for v in moves.xrange(1, 6)] - maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) - for v in moves.xrange(-3, 1)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-1].avg, - 'Transition to alarm due to 1 samples outside' - ' threshold, most recent: %s' % maxs[-1].max] - reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), - self._reason_data('outside', 1, maxs[-1].max)] - expected = [mock.call(alarm, 'insufficient data', - reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def _do_test_bound_duration(self, start, exclude_outliers=None): - alarm = self.alarms[0] - if exclude_outliers is not None: - alarm.rule['exclude_outliers'] = exclude_outliers - with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: - mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - constraint = self.evaluator._bound_duration(alarm.rule) - self.assertEqual((start, timeutils.utcnow().isoformat()), - constraint) - - def test_bound_duration_outlier_exclusion_defaulted(self): - self._do_test_bound_duration('2012-07-02T10:39:00') - - def test_bound_duration_outlier_exclusion_clear(self): - self._do_test_bound_duration('2012-07-02T10:39:00', False) - - def test_bound_duration_outlier_exclusion_set(self): - self._do_test_bound_duration('2012-07-02T10:35:00', True) - - def _do_test_simple_alarm_trip_outlier_exclusion(self, exclude_outliers): - self._set_all_rules('exclude_outliers', exclude_outliers) - self._set_all_alarms('ok') - # most recent datapoints inside threshold but with - # anomalously low sample count - threshold = self.alarms[0].rule['threshold'] - avgs = [self._get_stat('avg', - threshold + (v if v < 10 else -v), - count=20 if v < 10 else 1) - for v in moves.xrange(1, 11)] - threshold = self.alarms[1].rule['threshold'] - maxs = [self._get_stat('max', - threshold - (v if v < 7 else -v), - count=20 if v < 7 else 1) - for v in moves.xrange(8)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('alarm' if exclude_outliers else 'ok') - if exclude_outliers: - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to alarm due to 5 samples outside' - ' threshold, most recent: %s' % avgs[-2].avg, - 'Transition to alarm due to 4 samples outside' - ' threshold, most recent: %s' % maxs[-2].max] - reason_datas = [self._reason_data('outside', 5, avgs[-2].avg), - self._reason_data('outside', 4, maxs[-2].max)] - expected = [mock.call(alarm, 'ok', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_trip_with_outlier_exclusion(self): - self. _do_test_simple_alarm_trip_outlier_exclusion(True) - - def test_simple_alarm_no_trip_without_outlier_exclusion(self): - self. _do_test_simple_alarm_trip_outlier_exclusion(False) - - def _do_test_simple_alarm_clear_outlier_exclusion(self, exclude_outliers): - self._set_all_rules('exclude_outliers', exclude_outliers) - self._set_all_alarms('alarm') - # most recent datapoints outside threshold but with - # anomalously low sample count - threshold = self.alarms[0].rule['threshold'] - avgs = [self._get_stat('avg', - threshold - (v if v < 9 else -v), - count=20 if v < 9 else 1) - for v in moves.xrange(10)] - threshold = self.alarms[1].rule['threshold'] - maxs = [self._get_stat('max', - threshold + (v if v < 8 else -v), - count=20 if v < 8 else 1) - for v in moves.xrange(1, 9)] - self.api_client.statistics.list.side_effect = [avgs, maxs] - self._evaluate_all_alarms() - self._assert_all_alarms('ok' if exclude_outliers else 'alarm') - if exclude_outliers: - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls) - reasons = ['Transition to ok due to 5 samples inside' - ' threshold, most recent: %s' % avgs[-2].avg, - 'Transition to ok due to 4 samples inside' - ' threshold, most recent: %s' % maxs[-2].max] - reason_datas = [self._reason_data('inside', 5, avgs[-2].avg), - self._reason_data('inside', 4, maxs[-2].max)] - expected = [mock.call(alarm, 'alarm', reason, reason_data) - for alarm, reason, reason_data - in zip(self.alarms, reasons, reason_datas)] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - def test_simple_alarm_clear_with_outlier_exclusion(self): - self. _do_test_simple_alarm_clear_outlier_exclusion(True) - - def test_simple_alarm_no_clear_without_outlier_exclusion(self): - self. _do_test_simple_alarm_clear_outlier_exclusion(False) - - @mock.patch.object(timeutils, 'utcnow') - def test_state_change_inside_time_constraint(self, mock_utcnow): - self._set_all_alarms('ok') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 12, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - with mock.patch('ceilometerclient.client.get_client', - return_value=self.api_client): - # the following part based on test_simple_insufficient - self.api_client.statistics.list.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('insufficient data') - expected = [mock.call(alarm) for alarm in self.alarms] - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual(expected, update_calls, - "Alarm should change state if the current " - "time is inside its time constraint.") - expected = [mock.call( - alarm, - 'ok', - ('%d datapoints are unknown' - % alarm.rule['evaluation_periods']), - self._reason_data('unknown', - alarm.rule['evaluation_periods'], - None)) - for alarm in self.alarms] - self.assertEqual(expected, self.notifier.notify.call_args_list) - - @mock.patch.object(timeutils, 'utcnow') - def test_no_state_change_outside_time_constraint(self, mock_utcnow): - self._set_all_alarms('ok') - self.alarms[0].time_constraints = [ - {'name': 'test', - 'description': 'test', - 'start': '0 11 * * *', # daily at 11:00 - 'duration': 10800, # 3 hours - 'timezone': 'Europe/Ljubljana'} - ] - self.alarms[1].time_constraints = self.alarms[0].time_constraints - dt = datetime.datetime(2014, 1, 1, 15, 0, 0, - tzinfo=pytz.timezone('Europe/Ljubljana')) - mock_utcnow.return_value = dt.astimezone(pytz.UTC) - self.api_client.statistics.list.return_value = [] - self._evaluate_all_alarms() - self._assert_all_alarms('ok') - update_calls = self.storage_conn.update_alarm.call_args_list - self.assertEqual([], update_calls, - "Alarm should not change state if the current " - " time is outside its time constraint.") - self.assertEqual([], self.notifier.notify.call_args_list) diff --git a/aodh/tests/unit/test_api_v2_capabilities.py b/aodh/tests/unit/test_api_v2_capabilities.py deleted file mode 100644 index 7dbe8766..00000000 --- a/aodh/tests/unit/test_api_v2_capabilities.py +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from aodh.api.controllers.v2 import capabilities - - -class TestCapabilities(base.BaseTestCase): - - def test_recursive_keypairs(self): - data = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B'}} - pairs = list(capabilities._recursive_keypairs(data)) - self.assertEqual([('a', 'A'), ('b', 'B'), - ('nested:a', 'A'), ('nested:b', 'B')], - pairs) - - def test_recursive_keypairs_with_separator(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - }, - } - separator = '.' - pairs = list(capabilities._recursive_keypairs(data, separator)) - self.assertEqual([('a', 'A'), - ('b', 'B'), - ('nested.a', 'A'), - ('nested.b', 'B')], - pairs) - - def test_recursive_keypairs_with_list_of_dict(self): - small = 1 - big = 1 << 64 - expected = [('a', 'A'), - ('b', 'B'), - ('nested:list', [{small: 99, big: 42}])] - data = {'a': 'A', - 'b': 'B', - 'nested': {'list': [{small: 99, big: 42}]}} - pairs = list(capabilities._recursive_keypairs(data)) - self.assertEqual(len(expected), len(pairs)) - for k, v in pairs: - # the keys 1 and 1<<64 cause a hash collision on 64bit platforms - if k == 'nested:list': - self.assertIn(v, - [[{small: 99, big: 42}], - [{big: 42, small: 99}]]) - else: - self.assertIn((k, v), expected) diff --git a/aodh/tests/unit/test_bin.py b/aodh/tests/unit/test_bin.py deleted file mode 100644 index ce9e2511..00000000 --- a/aodh/tests/unit/test_bin.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import subprocess - -from oslo_utils import fileutils -import six - -from aodh.tests import base - - -class BinTestCase(base.BaseTestCase): - def setUp(self): - super(BinTestCase, self).setUp() - content = ("[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='aodh', - suffix='.conf') - - def tearDown(self): - super(BinTestCase, self).tearDown() - os.remove(self.tempfile) - - def test_dbsync_run(self): - subp = subprocess.Popen(['aodh-dbsync', - "--config-file=%s" % self.tempfile]) - self.assertEqual(0, subp.wait()) - - def test_run_expirer_ttl_disabled(self): - subp = subprocess.Popen(['aodh-expirer', - '-d', - "--config-file=%s" % self.tempfile], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, __ = subp.communicate() - self.assertEqual(0, subp.poll()) - self.assertIn(b"Nothing to clean, database alarm history " - b"time to live is disabled", out) - - def test_run_expirer_ttl_enabled(self): - content = ("[database]\n" - "alarm_history_time_to_live=1\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='aodh', - suffix='.conf') - subp = subprocess.Popen(['aodh-expirer', - '-d', - "--config-file=%s" % self.tempfile], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, __ = subp.communicate() - self.assertEqual(0, subp.poll()) - msg = "Dropping alarm history data with TTL 1" - if six.PY3: - msg = msg.encode('utf-8') - self.assertIn(msg, out) - - -class BinEvaluatorTestCase(base.BaseTestCase): - def setUp(self): - super(BinEvaluatorTestCase, self).setUp() - content = ("[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='aodh', - suffix='.conf') - self.subp = None - - def tearDown(self): - super(BinEvaluatorTestCase, self).tearDown() - if self.subp: - self.subp.kill() - os.remove(self.tempfile) - - def test_starting_evaluator(self): - self.subp = subprocess.Popen(['aodh-evaluator', - "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - self.assertIsNone(self.subp.poll()) - - -class BinNotifierTestCase(BinEvaluatorTestCase): - def test_starting_notifier(self): - self.subp = subprocess.Popen(['aodh-notifier', - "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - self.assertIsNone(self.subp.poll()) diff --git a/aodh/tests/unit/test_coordination.py b/aodh/tests/unit/test_coordination.py deleted file mode 100644 index 63dcadc8..00000000 --- a/aodh/tests/unit/test_coordination.py +++ /dev/null @@ -1,263 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -import tooz.coordination - -from aodh import coordination -from aodh import service -from aodh.tests import base - - -class MockToozCoordinator(object): - def __init__(self, member_id, shared_storage): - self._member_id = member_id - self._groups = shared_storage - self.is_started = False - - def start(self): - self.is_started = True - - def stop(self): - pass - - def heartbeat(self): - pass - - def create_group(self, group_id): - if group_id in self._groups: - return MockAsyncError( - tooz.coordination.GroupAlreadyExist(group_id)) - self._groups[group_id] = {} - return MockAsyncResult(None) - - def join_group(self, group_id, capabilities=b''): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - if self._member_id in self._groups[group_id]: - return MockAsyncError( - tooz.coordination.MemberAlreadyExist(group_id, - self._member_id)) - self._groups[group_id][self._member_id] = { - "capabilities": capabilities, - } - return MockAsyncResult(None) - - def leave_group(self, group_id): - return MockAsyncResult(None) - - def get_members(self, group_id): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - return MockAsyncResult(self._groups[group_id]) - - -class MockToozCoordExceptionRaiser(MockToozCoordinator): - def start(self): - raise tooz.coordination.ToozError('error') - - def heartbeat(self): - raise tooz.coordination.ToozError('error') - - def join_group(self, group_id, capabilities=b''): - raise tooz.coordination.ToozError('error') - - def get_members(self, group_id): - raise tooz.coordination.ToozError('error') - - -class MockAsyncResult(tooz.coordination.CoordAsyncResult): - def __init__(self, result): - self.result = result - - def get(self, timeout=0): - return self.result - - @staticmethod - def done(): - return True - - -class MockAsyncError(tooz.coordination.CoordAsyncResult): - def __init__(self, error): - self.error = error - - def get(self, timeout=0): - raise self.error - - @staticmethod - def done(): - return True - - -class TestHashRing(base.BaseTestCase): - def test_hash_ring(self): - num_nodes = 10 - num_keys = 1000 - - nodes = [str(x) for x in range(num_nodes)] - hr = coordination.HashRing(nodes) - - buckets = [0] * num_nodes - assignments = [-1] * num_keys - for k in range(num_keys): - n = int(hr.get_node(str(k))) - self.assertLessEqual(0, n) - self.assertLessEqual(n, num_nodes) - buckets[n] += 1 - assignments[k] = n - - # at least something in each bucket - self.assertTrue(all((c > 0 for c in buckets))) - - # approximately even distribution - diff = max(buckets) - min(buckets) - self.assertLess(diff, 0.3 * (num_keys / num_nodes)) - - # consistency - num_nodes += 1 - nodes.append(str(num_nodes + 1)) - hr = coordination.HashRing(nodes) - for k in range(num_keys): - n = int(hr.get_node(str(k))) - assignments[k] -= n - reassigned = len([c for c in assignments if c != 0]) - self.assertLess(reassigned, num_keys / num_nodes) - - -class TestPartitioning(base.BaseTestCase): - - def setUp(self): - super(TestPartitioning, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.shared_storage = {} - - def _get_new_started_coordinator(self, shared_storage, agent_id=None, - coordinator_cls=None): - coordinator_cls = coordinator_cls or MockToozCoordinator - self.CONF.set_override('backend_url', 'xxx://yyy', - group='coordination') - with mock.patch('tooz.coordination.get_coordinator', - lambda _, member_id: - coordinator_cls(member_id, shared_storage)): - pc = coordination.PartitionCoordinator(self.CONF, agent_id) - pc.start() - return pc - - def _usage_simulation(self, *agents_kwargs): - partition_coordinators = [] - for kwargs in agents_kwargs: - partition_coordinator = self._get_new_started_coordinator( - self.shared_storage, kwargs['agent_id'], kwargs.get( - 'coordinator_cls')) - partition_coordinator.join_group(kwargs['group_id']) - partition_coordinators.append(partition_coordinator) - - for i, kwargs in enumerate(agents_kwargs): - all_resources = kwargs.get('all_resources', []) - expected_resources = kwargs.get('expected_resources', []) - actual_resources = partition_coordinators[i].extract_my_subset( - kwargs['group_id'], all_resources) - self.assertEqual(expected_resources, actual_resources) - - def test_single_group(self): - agents = [dict(agent_id='agent1', group_id='group'), - dict(agent_id='agent2', group_id='group')] - self._usage_simulation(*agents) - - self.assertEqual(['group'], sorted(self.shared_storage.keys())) - self.assertEqual(['agent1', 'agent2'], - sorted(self.shared_storage['group'].keys())) - - def test_multiple_groups(self): - agents = [dict(agent_id='agent1', group_id='group1'), - dict(agent_id='agent2', group_id='group2')] - self._usage_simulation(*agents) - - self.assertEqual(['group1', 'group2'], - sorted(self.shared_storage.keys())) - - def test_partitioning(self): - all_resources = ['resource_%s' % i for i in range(1000)] - agents = ['agent_%s' % i for i in range(10)] - - expected_resources = [list() for _ in range(len(agents))] - hr = coordination.HashRing(agents) - for r in all_resources: - key = agents.index(hr.get_node(r)) - expected_resources[key].append(r) - - agents_kwargs = [] - for i, agent in enumerate(agents): - agents_kwargs.append(dict(agent_id=agent, - group_id='group', - all_resources=all_resources, - expected_resources=expected_resources[i])) - self._usage_simulation(*agents_kwargs) - - @mock.patch.object(coordination.LOG, 'exception') - def test_coordination_backend_offline(self, mocked_exception): - agents = [dict(agent_id='agent1', - group_id='group', - all_resources=['res1', 'res2'], - expected_resources=[], - coordinator_cls=MockToozCoordExceptionRaiser)] - self._usage_simulation(*agents) - called = [mock.call(u'Error connecting to coordination backend.'), - mock.call(u'Error getting group membership info from ' - u'coordination backend.')] - self.assertEqual(called, mocked_exception.call_args_list) - - @mock.patch.object(coordination.LOG, 'exception') - @mock.patch.object(coordination.LOG, 'info') - def test_reconnect(self, mock_info, mocked_exception): - coord = self._get_new_started_coordinator({}, 'a', - MockToozCoordExceptionRaiser) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordExceptionRaiser('a', {})): - coord.heartbeat() - called = [mock.call(u'Error connecting to coordination backend.'), - mock.call(u'Error connecting to coordination backend.'), - mock.call(u'Error sending a heartbeat to coordination ' - u'backend.')] - self.assertEqual(called, mocked_exception.call_args_list) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordinator('a', {})): - coord.heartbeat() - mock_info.assert_called_with(u'Coordination backend started ' - u'successfully.') - - def test_group_id_none(self): - coord = self._get_new_started_coordinator({}, 'a') - self.assertTrue(coord._coordinator.is_started) - - with mock.patch.object(coord._coordinator, 'join_group') as mocked: - coord.join_group(None) - self.assertEqual(0, mocked.call_count) - with mock.patch.object(coord._coordinator, 'leave_group') as mocked: - coord.leave_group(None) - self.assertEqual(0, mocked.call_count) - - def test_stop(self): - coord = self._get_new_started_coordinator({}, 'a') - self.assertTrue(coord._coordinator.is_started) - coord.join_group("123") - coord.stop() - self.assertIsEmpty(coord._groups) - self.assertIsNone(coord._coordinator) diff --git a/aodh/tests/unit/test_evaluator.py b/aodh/tests/unit/test_evaluator.py deleted file mode 100644 index 81420a6e..00000000 --- a/aodh/tests/unit/test_evaluator.py +++ /dev/null @@ -1,150 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for aodh.evaluator.AlarmEvaluationService. -""" -import fixtures -import time - -import mock -from oslo_config import fixture as fixture_config -from stevedore import extension - -from aodh import evaluator -from aodh import service -from aodh.tests import base as tests_base - - -class TestAlarmEvaluationService(tests_base.BaseTestCase): - def setUp(self): - super(TestAlarmEvaluationService, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.CONF.set_override('workers', 1, 'evaluator') - self.setup_messaging(self.CONF) - - self.threshold_eval = mock.MagicMock() - self._fake_conn = mock.Mock() - self._fake_pc = mock.Mock() - self._fake_em = extension.ExtensionManager.make_test_instance( - [ - extension.Extension( - 'threshold', - None, - None, - self.threshold_eval), - ] - ) - - self.useFixture(fixtures.MockPatch( - 'stevedore.extension.ExtensionManager', - return_value=self._fake_em - )) - self.useFixture(fixtures.MockPatch( - 'aodh.coordination.PartitionCoordinator', - return_value=self._fake_pc - )) - self.useFixture(fixtures.MockPatch( - 'aodh.storage.get_connection_from_config', - return_value=self._fake_conn - )) - - def _do_test_start(self, test_interval=120, - coordination_heartbeat=1.0, - coordination_active=False): - - self.CONF.set_override('evaluation_interval', - test_interval) - self.CONF.set_override('heartbeat', - coordination_heartbeat, - group='coordination') - - self._fake_pc.is_active.return_value = coordination_active - - svc = evaluator.AlarmEvaluationService(0, self.CONF) - self.addCleanup(svc.terminate) - svc.terminate() - svc.partition_coordinator.start.assert_called_once_with() - svc.partition_coordinator.join_group.assert_called_once_with( - svc.PARTITIONING_GROUP_NAME) - - def test_start_singleton(self): - self._do_test_start(coordination_active=False) - - def test_start_coordinated(self): - self._do_test_start(coordination_active=True) - - def test_start_coordinated_high_hb_interval(self): - self._do_test_start(coordination_active=True, test_interval=10, - coordination_heartbeat=5) - - def test_evaluation_cycle(self): - alarm = mock.Mock(type='threshold', alarm_id="alarm_id1") - self._fake_pc.extract_my_subset.return_value = ["alarm_id1"] - self._fake_pc.is_active.return_value = False - self._fake_conn.get_alarms.return_value = [alarm] - self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] - - svc = evaluator.AlarmEvaluationService(0, self.CONF) - self.addCleanup(svc.terminate) - time.sleep(1) - target = svc.partition_coordinator.extract_my_subset - target.assert_called_once_with(svc.PARTITIONING_GROUP_NAME, - ["alarm_id1"]) - self.threshold_eval.evaluate.assert_called_once_with(alarm) - - def test_evaluation_cycle_with_bad_alarm(self): - - alarms = [ - mock.Mock(type='threshold', name='bad', alarm_id='a'), - mock.Mock(type='threshold', name='good', alarm_id='b'), - ] - self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] - - self._fake_pc.is_active.return_value = False - self._fake_pc.extract_my_subset.return_value = ['a', 'b'] - self._fake_conn.get_alarms.return_value = alarms - - svc = evaluator.AlarmEvaluationService(0, self.CONF) - self.addCleanup(svc.terminate) - time.sleep(1) - self.assertEqual([mock.call(alarms[0]), mock.call(alarms[1])], - self.threshold_eval.evaluate.call_args_list) - - def test_unknown_extension_skipped(self): - alarms = [ - mock.Mock(type='not_existing_type', alarm_id='a'), - mock.Mock(type='threshold', alarm_id='b') - ] - - self._fake_pc.is_active.return_value = False - self._fake_pc.extract_my_subset.return_value = ['a', 'b'] - self._fake_conn.get_alarms.return_value = alarms - - svc = evaluator.AlarmEvaluationService(0, self.CONF) - self.addCleanup(svc.terminate) - time.sleep(1) - self.threshold_eval.evaluate.assert_called_once_with(alarms[1]) - - def test_check_alarm_query_constraints(self): - self._fake_conn.get_alarms.return_value = [] - self._fake_pc.extract_my_subset.return_value = [] - self._fake_pc.is_active.return_value = False - - svc = evaluator.AlarmEvaluationService(0, self.CONF) - self.addCleanup(svc.terminate) - time.sleep(1) - expected = [({'enabled': True, 'exclude': {'type': 'event'}},)] - self.assertEqual(expected, - svc.storage_conn.get_alarms.call_args_list) diff --git a/aodh/tests/unit/test_event.py b/aodh/tests/unit/test_event.py deleted file mode 100644 index 2a85b973..00000000 --- a/aodh/tests/unit/test_event.py +++ /dev/null @@ -1,62 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import time - -from oslo_config import fixture as fixture_config -import oslo_messaging - -from aodh import event -from aodh import service -from aodh.tests import base as tests_base - - -class TestEventAlarmEvaluationService(tests_base.BaseTestCase): - - def setUp(self): - super(TestEventAlarmEvaluationService, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.CONF.set_override("batch_size", 2, 'listener') - self.setup_messaging(self.CONF) - - @mock.patch('aodh.storage.get_connection_from_config', - mock.MagicMock()) - @mock.patch('aodh.event.EventAlarmEndpoint.sample') - def test_batch_event_listener(self, mocked): - msg_notifier = oslo_messaging.Notifier( - self.transport, topics=['alarm.all'], driver='messaging', - publisher_id='test-publisher') - - received_events = [] - mocked.side_effect = lambda msg: received_events.append(msg) - event1 = {'event_type': 'compute.instance.update', - 'traits': ['foo', 'bar'], - 'message_id': '20d03d17-4aba-4900-a179-dba1281a3451', - 'generated': '2016-04-23T06:50:21.622739'} - event2 = {'event_type': 'compute.instance.update', - 'traits': ['foo', 'bar'], - 'message_id': '20d03d17-4aba-4900-a179-dba1281a3452', - 'generated': '2016-04-23T06:50:23.622739'} - msg_notifier.sample({}, 'event', event1) - msg_notifier.sample({}, 'event', event2) - - svc = event.EventAlarmEvaluationService(0, self.CONF) - self.addCleanup(svc.terminate) - - time.sleep(1) - self.assertEqual(1, len(received_events)) - self.assertEqual(2, len(received_events[0])) diff --git a/aodh/tests/unit/test_messaging.py b/aodh/tests/unit/test_messaging.py deleted file mode 100644 index 9f4be807..00000000 --- a/aodh/tests/unit/test_messaging.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging.conffixture -from oslotest import base - -from aodh import messaging - - -class MessagingTests(base.BaseTestCase): - def setUp(self): - super(MessagingTests, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) - - def test_get_transport_invalid_url(self): - self.assertRaises(oslo_messaging.InvalidTransportURL, - messaging.get_transport, self.CONF, "notvalid!") - - def test_get_transport_url_caching(self): - t1 = messaging.get_transport(self.CONF, 'fake://') - t2 = messaging.get_transport(self.CONF, 'fake://') - self.assertEqual(t1, t2) - - def test_get_transport_default_url_caching(self): - t1 = messaging.get_transport(self.CONF, ) - t2 = messaging.get_transport(self.CONF, ) - self.assertEqual(t1, t2) - - def test_get_transport_default_url_no_caching(self): - t1 = messaging.get_transport(self.CONF, cache=False) - t2 = messaging.get_transport(self.CONF, cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_no_caching(self): - t1 = messaging.get_transport(self.CONF, 'fake://', cache=False) - t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_default_url_caching_mix(self): - t1 = messaging.get_transport(self.CONF, ) - t2 = messaging.get_transport(self.CONF, cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_caching_mix(self): - t1 = messaging.get_transport(self.CONF, 'fake://') - t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_optional(self): - self.CONF.set_override('transport_url', 'non-url') - self.assertIsNone(messaging.get_transport(self.CONF, optional=True, - cache=False)) diff --git a/aodh/tests/unit/test_notifier.py b/aodh/tests/unit/test_notifier.py deleted file mode 100644 index 4291fc90..00000000 --- a/aodh/tests/unit/test_notifier.py +++ /dev/null @@ -1,459 +0,0 @@ -# -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import fixtures -import time - -import mock -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslo_serialization import jsonutils -import requests -import six.moves.urllib.parse as urlparse - -from aodh import notifier -from aodh import service -from aodh.tests import base as tests_base - - -DATA_JSON = jsonutils.loads( - '{"current": "ALARM", "alarm_id": "foobar", "alarm_name": "testalarm",' - ' "severity": "critical", "reason": "what ?",' - ' "reason_data": {"test": "test"}, "previous": "OK"}' -) -NOTIFICATION = dict(alarm_id='foobar', - alarm_name='testalarm', - severity='critical', - condition=dict(threshold=42), - reason='what ?', - reason_data={'test': 'test'}, - previous='OK', - current='ALARM') - - -class TestAlarmNotifierService(tests_base.BaseTestCase): - - def setUp(self): - super(TestAlarmNotifierService, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.setup_messaging(self.CONF) - - def test_init_host_queue(self): - self.service = notifier.AlarmNotifierService(0, self.CONF) - self.service.terminate() - - -class TestAlarmNotifier(tests_base.BaseTestCase): - def setUp(self): - super(TestAlarmNotifier, self).setUp() - conf = service.prepare_service(argv=[], config_files=[]) - self.CONF = self.useFixture(fixture_config.Config(conf)).conf - self.setup_messaging(self.CONF) - self._msg_notifier = oslo_messaging.Notifier( - self.transport, topics=['alarming'], driver='messaging', - publisher_id='testpublisher') - self.zaqar = FakeZaqarClient(self) - self.useFixture(fixtures.MockPatch( - 'aodh.notifier.zaqar.ZaqarAlarmNotifier.get_zaqar_client', - return_value=self.zaqar)) - self.service = notifier.AlarmNotifierService(0, self.CONF) - self.addCleanup(self.service.terminate) - - def test_notify_alarm(self): - data = { - 'actions': ['test://'], - 'alarm_id': 'foobar', - 'alarm_name': 'testalarm', - 'severity': 'critical', - 'previous': 'OK', - 'current': 'ALARM', - 'reason': 'Everything is on fire', - 'reason_data': {'fire': 'everywhere'} - } - self._msg_notifier.sample({}, 'alarm.update', data) - time.sleep(1) - notifications = self.service.notifiers['test'].obj.notifications - self.assertEqual(1, len(notifications)) - self.assertEqual((urlparse.urlsplit(data['actions'][0]), - data['alarm_id'], - data['alarm_name'], - data['severity'], - data['previous'], - data['current'], - data['reason'], - data['reason_data']), - notifications[0]) - - @mock.patch('aodh.notifier.LOG.debug') - def test_notify_alarm_with_batch_listener(self, logger): - data1 = { - 'actions': ['test://'], - 'alarm_id': 'foobar', - 'alarm_name': 'testalarm', - 'severity': 'critical', - 'previous': 'OK', - 'current': 'ALARM', - 'reason': 'Everything is on fire', - 'reason_data': {'fire': 'everywhere'} - } - data2 = { - 'actions': ['test://'], - 'alarm_id': 'foobar2', - 'alarm_name': 'testalarm2', - 'severity': 'low', - 'previous': 'ALARM', - 'current': 'OK', - 'reason': 'Everything is fine', - 'reason_data': {'fine': 'fine'} - } - self.service.terminate() - self.CONF.set_override("batch_size", 2, 'notifier') - # Init a new service with new configuration - self.svc = notifier.AlarmNotifierService(0, self.CONF) - self.addCleanup(self.svc.terminate) - self._msg_notifier.sample({}, 'alarm.update', data1) - self._msg_notifier.sample({}, 'alarm.update', data2) - time.sleep(1) - notifications = self.svc.notifiers['test'].obj.notifications - self.assertEqual(2, len(notifications)) - self.assertEqual((urlparse.urlsplit(data1['actions'][0]), - data1['alarm_id'], - data1['alarm_name'], - data1['severity'], - data1['previous'], - data1['current'], - data1['reason'], - data1['reason_data']), - notifications[0]) - self.assertEqual((urlparse.urlsplit(data2['actions'][0]), - data2['alarm_id'], - data2['alarm_name'], - data2['severity'], - data2['previous'], - data2['current'], - data2['reason'], - data2['reason_data']), - notifications[1]) - self.assertEqual(mock.call('Received %s messages in batch.', 2), - logger.call_args_list[0]) - - @staticmethod - def _notification(action): - notification = {} - notification.update(NOTIFICATION) - notification['actions'] = [action] - return notification - - @mock.patch('aodh.notifier.rest.LOG') - def test_notify_alarm_rest_action_ok(self, m_log): - action = 'http://host/action' - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY) - args, kwargs = poster.call_args - self.assertEqual( - { - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json' - }, - kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - self.assertEqual(2, len(m_log.info.call_args_list)) - expected = mock.call('Notifying alarm <%(id)s> gets response: ' - '%(status_code)s %(reason)s.', - mock.ANY) - self.assertEqual(expected, m_log.info.call_args_list[1]) - - def test_notify_alarm_rest_action_with_ssl_client_cert(self): - action = 'https://host/action' - certificate = "/etc/ssl/cert/whatever.pem" - - self.CONF.set_override("rest_notifier_certificate_file", certificate) - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - cert=certificate, verify=True) - args, kwargs = poster.call_args - self.assertEqual( - { - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json' - }, - kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self): - action = 'https://host/action' - certificate = "/etc/ssl/cert/whatever.pem" - key = "/etc/ssl/cert/whatever.key" - - self.CONF.set_override("rest_notifier_certificate_file", certificate) - self.CONF.set_override("rest_notifier_certificate_key", key) - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - cert=(certificate, key), verify=True) - args, kwargs = poster.call_args - self.assertEqual( - { - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json'}, - kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self): - action = 'https://host/action' - - self.CONF.set_override("rest_notifier_ssl_verify", False) - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=False) - args, kwargs = poster.call_args - self.assertEqual( - { - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json' - }, - kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_server_verify_enable(self): - action = 'https://host/action' - ca_bundle = "/path/to/custom_cert.pem" - - self.CONF.set_override("rest_notifier_ca_bundle_certificate_path", - ca_bundle) - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=ca_bundle) - args, kwargs = poster.call_args - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_verify_disable(self): - action = 'https://host/action?aodh-alarm-ssl-verify=0' - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=False) - args, kwargs = poster.call_args - self.assertEqual( - { - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json' - }, - kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self): - action = 'https://host/action?aodh-alarm-ssl-verify=1' - - self.CONF.set_override("rest_notifier_ssl_verify", False) - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, - 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with(action, data=mock.ANY, - headers=mock.ANY, - verify=True) - args, kwargs = poster.call_args - self.assertEqual( - { - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json' - }, - kwargs['headers']) - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - @staticmethod - def _fake_urlsplit(*args, **kwargs): - raise Exception("Evil urlsplit!") - - def test_notify_alarm_invalid_url(self): - with mock.patch('oslo_utils.netutils.urlsplit', - self._fake_urlsplit): - LOG = mock.MagicMock() - with mock.patch('aodh.notifier.LOG', LOG): - self._msg_notifier.sample( - {}, 'alarm.update', - { - 'actions': ['no-such-action-i-am-sure'], - 'alarm_id': 'foobar', - 'condition': {'threshold': 42}, - }) - time.sleep(1) - self.assertTrue(LOG.error.called) - - def test_notify_alarm_invalid_action(self): - LOG = mock.MagicMock() - with mock.patch('aodh.notifier.LOG', LOG): - self._msg_notifier.sample( - {}, 'alarm.update', - { - 'actions': ['no-such-action-i-am-sure://'], - 'alarm_id': 'foobar', - 'condition': {'threshold': 42}, - }) - time.sleep(1) - self.assertTrue(LOG.error.called) - - def test_notify_alarm_trust_action(self): - action = 'trust+http://trust-1234@host/action' - url = 'http://host/action' - - client = mock.MagicMock() - client.session.auth.get_access.return_value.auth_token = 'token_1234' - - self.useFixture( - fixtures.MockPatch('aodh.keystone_client.get_trusted_client', - lambda *args: client)) - - with mock.patch.object(requests.Session, 'post') as poster: - self._msg_notifier.sample({}, 'alarm.update', - self._notification(action)) - time.sleep(1) - poster.assert_called_with( - url, data=mock.ANY, headers=mock.ANY) - args, kwargs = poster.call_args - self.assertEqual( - { - 'X-Auth-Token': 'token_1234', - 'x-openstack-request-id': - kwargs['headers']['x-openstack-request-id'], - 'content-type': 'application/json' - }, - kwargs['headers']) - - self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) - - def test_zaqar_notifier_action(self): - with mock.patch.object(notifier.zaqar.ZaqarAlarmNotifier, - '_get_client_conf') as get_conf: - action = ('zaqar://?topic=critical' - '&subscriber=http://example.com/data' - '&subscriber=mailto:foo@example.com&ttl=7200') - self._msg_notifier.sample({}, 'alarm.update', - self._notification(action)) - time.sleep(1) - get_conf.assert_called() - self.assertEqual(self.zaqar, - self.service.notifiers['zaqar'].obj._zclient) - self.assertEqual(2, self.zaqar.subscriptions) - self.assertEqual(1, self.zaqar.posts) - - def test_presigned_zaqar_notifier_action(self): - action = ('zaqar://?' - 'subscriber=http://example.com/data&ttl=7200' - '&signature=mysignature&expires=2016-06-29T01:49:56' - '&paths=/v2/queues/beijing/messages' - '&methods=GET,PATCH,POST,PUT&queue_name=foobar-critical' - '&project_id=my_project_id') - self._msg_notifier.sample({}, 'alarm.update', - self._notification(action)) - time.sleep(1) - self.assertEqual(1, self.zaqar.subscriptions) - self.assertEqual(1, self.zaqar.posts) - - def test_trust_zaqar_notifier_action(self): - client = mock.MagicMock() - client.session.auth.get_access.return_value.auth_token = 'token_1234' - - self.useFixture( - fixtures.MockPatch('aodh.keystone_client.get_trusted_client', - lambda *args: client)) - - action = 'trust+zaqar://trust-1234:delete@?queue_name=foobar-critical' - self._msg_notifier.sample({}, 'alarm.update', - self._notification(action)) - time.sleep(1) - self.assertEqual(0, self.zaqar.subscriptions) - self.assertEqual(1, self.zaqar.posts) - - -class FakeZaqarClient(object): - - def __init__(self, testcase): - self.testcase = testcase - self.subscriptions = 0 - self.posts = 0 - - def queue(self, queue_name, **kwargs): - self.testcase.assertEqual('foobar-critical', queue_name) - self.testcase.assertEqual({}, kwargs) - return FakeZaqarQueue(self) - - def subscription(self, queue_name, **kwargs): - self.testcase.assertEqual('foobar-critical', queue_name) - subscribers = ['http://example.com/data', 'mailto:foo@example.com'] - self.testcase.assertIn(kwargs['subscriber'], subscribers) - self.testcase.assertEqual(7200, kwargs['ttl']) - self.subscriptions += 1 - - -class FakeZaqarQueue(object): - - def __init__(self, client): - self.client = client - self.testcase = client.testcase - - def post(self, message): - expected_message = {'body': {'alarm_name': 'testalarm', - 'reason_data': {'test': 'test'}, - 'current': 'ALARM', - 'alarm_id': 'foobar', - 'reason': 'what ?', - 'severity': 'critical', - 'previous': 'OK'}} - self.testcase.assertEqual(expected_message, message) - self.client.posts += 1 diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 70f6bbe5..00000000 --- a/bindep.txt +++ /dev/null @@ -1,5 +0,0 @@ -libpq-dev [platform:dpkg] -postgresql [platform:dpkg] -mysql-client [platform:dpkg] -mysql-server [platform:dpkg] -gettext [platform:dpkg] diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index d5bf2276..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -========================= -Enabling Aodh in DevStack -========================= - -1. Download DevStack:: - - git clone https://git.openstack.org/openstack-dev/devstack.git - cd devstack - -2. Add this repo as an external repository in ``local.conf`` file:: - - [[local|localrc]] - enable_plugin aodh https://git.openstack.org/openstack/aodh - - To use stable branches, make sure devstack is on that branch, and specify - the branch name to enable_plugin, for example:: - - enable_plugin aodh https://git.openstack.org/openstack/aodh stable/mitaka - - There are some options, such as AODH_BACKEND, defined in - ``aodh/devstack/settings``, they can be used to configure the installation - of Aodh. If you don't want to use their default value, you can set a new - one in ``local.conf``. - -3. Run ``stack.sh``. diff --git a/devstack/apache-aodh.template b/devstack/apache-aodh.template deleted file mode 100644 index 039476ae..00000000 --- a/devstack/apache-aodh.template +++ /dev/null @@ -1,15 +0,0 @@ -Listen %PORT% - - - WSGIDaemonProcess aodh-api processes=%APIWORKERS% threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup aodh-api - WSGIScriptAlias / %WSGIAPP% - WSGIApplicationGroup %{GLOBAL} - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/aodh.log - CustomLog /var/log/%APACHE_NAME%/aodh_access.log combined - - -WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh deleted file mode 100755 index 002a592c..00000000 --- a/devstack/gate/gate_hook.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside gate_hook function in devstack gate. - -ENABLED_SERVICES="key,aodh-api,aodh-notifier,aodh-evaluator" -ENABLED_SERVICES+="ceilometer-acompute,ceilometer-acentral,ceilometer-anotification," -ENABLED_SERVICES+="ceilometer-collector,ceilometer-api," - -# The backend is passed in by the job as the first and only argument -export AODH_BACKEND="${1}" -export DEVSTACK_GATE_INSTALL_TESTONLY=1 -export DEVSTACK_GATE_NO_SERVICES=1 -export DEVSTACK_GATE_TEMPEST=0 -export DEVSTACK_GATE_EXERCISES=0 -export KEEP_LOCALRC=1 - -# default to mysql -case $AODH_BACKEND in - postgresql) - export DEVSTACK_GATE_POSTGRES=1 - ;; -esac - -DEVSTACK_LOCAL_CONFIG+=$'\n'"export AODH_BACKEND=${AODH_BACKEND}" - -export ENABLED_SERVICES - -$BASE/new/devstack-gate/devstack-vm-gate.sh diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index c145e852..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,337 +0,0 @@ -# Install and start **Aodh** service in devstack -# -# To enable Aodh in devstack add an entry to local.conf that -# looks like -# -# [[local|localrc]] -# enable_plugin aodh git://git.openstack.org/openstack/aodh -# -# By default all aodh services are started (see -# devstack/settings). -# -# AODH_BACKEND: Database backend (e.g. 'mysql') -# AODH_COORDINATION_URL: URL for group membership service provided by tooz. - -# Support potential entry-points console scripts in VENV or not -if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["aodh"]=${AODH_DIR}.venv - AODH_BIN_DIR=${PROJECT_VENV["aodh"]}/bin -else - AODH_BIN_DIR=$(get_python_exec_prefix) -fi - - -if [ -z "$AODH_DEPLOY" ]; then - # Default - AODH_DEPLOY=simple - - # Fallback to common wsgi devstack configuration - if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then - AODH_DEPLOY=mod_wsgi - - # Deprecated config - elif [ -n "$AODH_USE_MOD_WSGI" ] ; then - echo_summary "AODH_USE_MOD_WSGI is deprecated, use AODH_DEPLOY instead" - if [ "$AODH_USE_MOD_WSGI" == True ]; then - AODH_DEPLOY=mod_wsgi - fi - fi -fi - -# Test if any Aodh services are enabled -# is_aodh_enabled -function is_aodh_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"aodh-" ]] && return 0 - return 1 -} - -function aodh_service_url { - echo "$AODH_SERVICE_PROTOCOL://$AODH_SERVICE_HOST:$AODH_SERVICE_PORT" -} - - -# _install_redis() - Install the redis server and python lib. -function _aodh_install_redis { - if is_ubuntu; then - install_package redis-server - restart_service redis-server - else - # This will fail (correctly) where a redis package is unavailable - install_package redis - restart_service redis - fi - - pip_install_gr redis -} - -# Configure mod_wsgi -function _aodh_config_apache_wsgi { - sudo mkdir -p $AODH_WSGI_DIR - - local aodh_apache_conf=$(apache_site_config_for aodh) - local apache_version=$(get_apache_version) - local venv_path="" - - # Copy proxy vhost and wsgi file - sudo cp $AODH_DIR/aodh/api/app.wsgi $AODH_WSGI_DIR/app - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["aodh"]}/lib/$(python_version)/site-packages" - fi - - sudo cp $AODH_DIR/devstack/apache-aodh.template $aodh_apache_conf - sudo sed -e " - s|%PORT%|$AODH_SERVICE_PORT|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%WSGIAPP%|$AODH_WSGI_DIR/app|g; - s|%USER%|$STACK_USER|g; - s|%APIWORKERS%|$API_WORKERS|g; - s|%VIRTUALENV%|$venv_path|g - " -i $aodh_apache_conf -} - -# Install required services for coordination -function _aodh_prepare_coordination { - if echo $AODH_COORDINATION_URL | grep -q '^memcached:'; then - install_package memcached - elif echo $AODH_COORDINATION_URL | grep -q '^redis:'; then - _aodh_install_redis - fi -} - -# Create aodh related accounts in Keystone -function _aodh_create_accounts { - if is_service_enabled aodh-api; then - - create_service_user "aodh" "admin" - - local aodh_service=$(get_or_create_service "aodh" \ - "alarming" "OpenStack Alarming Service") - get_or_create_endpoint $aodh_service \ - "$REGION_NAME" \ - "$(aodh_service_url)" \ - "$(aodh_service_url)" \ - "$(aodh_service_url)" - fi -} - -# Activities to do before aodh has been installed. -function preinstall_aodh { - # Needed to build psycopg2 - if is_ubuntu; then - install_package libpq-dev - else - install_package postgresql-devel - fi -} - -# Remove WSGI files, disable and remove Apache vhost file -function _aodh_cleanup_apache_wsgi { - sudo rm -f $AODH_WSGI_DIR/* - sudo rm -f $(apache_site_config_for aodh) -} - -# cleanup_aodh() - Remove residual data files, anything left over -# from previous runs that a clean run would need to clean up -function cleanup_aodh { - if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then - _aodh_cleanup_apache_wsgi - fi -} - -# Set configuration for storage backend. -function _aodh_configure_storage_backend { - if [ "$AODH_BACKEND" = 'mysql' ] || [ "$AODH_BACKEND" = 'postgresql' ] ; then - iniset $AODH_CONF database connection $(database_connection_url aodh) - else - die $LINENO "Unable to configure unknown AODH_BACKEND $AODH_BACKEND" - fi -} - -# Configure Aodh -function configure_aodh { - iniset_rpc_backend aodh $AODH_CONF - - iniset $AODH_CONF oslo_messaging_notifications topics "$AODH_NOTIFICATION_TOPICS" - iniset $AODH_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - - if [[ -n "$AODH_COORDINATION_URL" ]]; then - iniset $AODH_CONF coordination backend_url $AODH_COORDINATION_URL - fi - - # Set up logging - if [ "$SYSLOG" != "False" ]; then - iniset $AODH_CONF DEFAULT use_syslog "True" - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$AODH_DEPLOY" != "mod_wsgi" ]; then - setup_colorized_logging $AODH_CONF DEFAULT - fi - - # The alarm evaluator needs these options to call gnocchi/ceilometer APIs - iniset $AODH_CONF service_credentials auth_type password - iniset $AODH_CONF service_credentials username aodh - iniset $AODH_CONF service_credentials user_domain_id default - iniset $AODH_CONF service_credentials project_domain_id default - iniset $AODH_CONF service_credentials password $SERVICE_PASSWORD - iniset $AODH_CONF service_credentials project_name $SERVICE_PROJECT_NAME - iniset $AODH_CONF service_credentials region_name $REGION_NAME - iniset $AODH_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI - - configure_auth_token_middleware $AODH_CONF aodh $AODH_AUTH_CACHE_DIR - - # Configured storage - _aodh_configure_storage_backend - - # NOTE: This must come after database configuration as those can - # call cleanup_aodh which will wipe the WSGI config. - if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then - _aodh_config_apache_wsgi - elif [ "$AODH_DEPLOY" == "uwsgi" ]; then - # iniset creates these files when it's called if they don't exist. - AODH_UWSGI_FILE=$AODH_CONF_DIR/aodh-uwsgi.ini - - rm -f "$AODH_UWSGI_FILE" - - iniset "$AODH_UWSGI_FILE" uwsgi http $AODH_SERVICE_HOST:$AODH_SERVICE_PORT - iniset "$AODH_UWSGI_FILE" uwsgi wsgi-file "$AODH_DIR/aodh/api/app.wsgi" - # This is running standalone - iniset "$AODH_UWSGI_FILE" uwsgi master true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$AODH_UWSGI_FILE" uwsgi die-on-term true - iniset "$AODH_UWSGI_FILE" uwsgi exit-on-reload true - iniset "$AODH_UWSGI_FILE" uwsgi threads 10 - iniset "$AODH_UWSGI_FILE" uwsgi processes $API_WORKERS - iniset "$AODH_UWSGI_FILE" uwsgi enable-threads true - iniset "$AODH_UWSGI_FILE" uwsgi plugins python - iniset "$AODH_UWSGI_FILE" uwsgi lazy-apps true - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$AODH_UWSGI_FILE" uwsgi thunder-lock true - # Override the default size for headers from the 4k default. - iniset "$AODH_UWSGI_FILE" uwsgi buffer-size 65535 - # Make sure the client doesn't try to re-use the connection. - iniset "$AODH_UWSGI_FILE" uwsgi add-header "Connection: close" - fi - -} - -# init_aodh() - Initialize etc. -function init_aodh { - # Get aodh keystone settings in place - _aodh_create_accounts - # Create cache dir - sudo install -d -o $STACK_USER $AODH_AUTH_CACHE_DIR - rm -f $AODH_AUTH_CACHE_DIR/* - - if is_service_enabled mysql postgresql; then - if [ "$AODH_BACKEND" = 'mysql' ] || [ "$AODH_BACKEND" = 'postgresql' ] ; then - recreate_database aodh - $AODH_BIN_DIR/aodh-dbsync - fi - fi -} - -# Install Aodh. -# The storage and coordination backends are installed here because the -# virtualenv context is active at this point and python drivers need to be -# installed. The context is not active during preinstall (when it would -# otherwise makes sense to do the backend services). -function install_aodh { - _aodh_prepare_coordination - install_aodhclient - if python3_enabled; then - PY_VERS=${PYTHON3_VERSION} - else - PY_VERS=${PYTHON2_VERSION} - fi - sudo -H python${PY_VERS} -m pip install -e "$AODH_DIR"[test,$AODH_BACKEND] - sudo install -d -o $STACK_USER -m 755 $AODH_CONF_DIR - - if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - elif [ "$AODH_DEPLOY" == "uwsgi" ]; then - pip_install uwsgi - fi -} - -# install_aodhclient() - Collect source and prepare -function install_aodhclient { - if use_library_from_git "python-aodhclient"; then - git_clone_by_name "python-aodhclient" - setup_dev_lib "python-aodhclient" - else - pip_install_gr aodhclient - fi - aodh complete | sudo tee /etc/bash_completion.d/aodh.bash_completion > /dev/null -} - -# start_aodh() - Start running processes, including screen -function start_aodh { - if [[ "$AODH_DEPLOY" == "mod_wsgi" ]]; then - enable_apache_site aodh - restart_apache_server - tail_log aodh /var/log/$APACHE_NAME/aodh.log - tail_log aodh-api /var/log/$APACHE_NAME/aodh_access.log - elif [ "$AODH_DEPLOY" == "uwsgi" ]; then - run_process aodh-api "$AODH_BIN_DIR/uwsgi $AODH_UWSGI_FILE" - else - run_process aodh-api "$AODH_BIN_DIR/aodh-api -p $AODH_SERVICE_PORT" - fi - - # Only die on API if it was actually intended to be turned on - if is_service_enabled aodh-api; then - echo "Waiting for aodh-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $(aodh_service_url)/v2/; then - die $LINENO "aodh-api did not start" - fi - fi - - run_process aodh-notifier "$AODH_BIN_DIR/aodh-notifier --config-file $AODH_CONF" - run_process aodh-evaluator "$AODH_BIN_DIR/aodh-evaluator --config-file $AODH_CONF" - run_process aodh-listener "$AODH_BIN_DIR/aodh-listener --config-file $AODH_CONF" -} - -# stop_aodh() - Stop running processes -function stop_aodh { - if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site aodh - restart_apache_server - fi - # Kill the aodh screen windows - for serv in aodh-api aodh-notifier aodh-evaluator aodh-listener; do - stop_process $serv - done -} - -# This is the main for plugin.sh -if is_service_enabled aodh; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - # Set up other services - echo_summary "Configuring system services for Aodh" - preinstall_aodh - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Aodh" - # Use stack_install_service here to account for virtualenv - stack_install_service aodh - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Aodh" - configure_aodh - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Aodh" - # Tidy base for aodh - init_aodh - # Start the services - start_aodh - fi - - if [[ "$1" == "unstack" ]]; then - echo_summary "Shutting Down Aodh" - stop_aodh - fi - - if [[ "$1" == "clean" ]]; then - echo_summary "Cleaning Aodh" - cleanup_aodh - fi -fi diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 99e7c3e1..00000000 --- a/devstack/settings +++ /dev/null @@ -1,45 +0,0 @@ -# turn on all the aodh services by default -# API service -enable_service aodh-api -# Alarming -enable_service aodh-notifier aodh-evaluator -# Listener for Event Alarming -enable_service aodh-listener - -# Default directories -AODH_DIR=$DEST/aodh -AODH_CONF_DIR=/etc/aodh -AODH_CONF=$AODH_CONF_DIR/aodh.conf -AODH_AUTH_CACHE_DIR=${AODH_AUTH_CACHE_DIR:-/var/cache/aodh} -AODH_WSGI_DIR=${AODH_WSGI_DIR:-/var/www/aodh} - -# Set up database backend -AODH_BACKEND=${AODH_BACKEND:-mysql} - -# Aodh connection info. -AODH_SERVICE_PROTOCOL=http -AODH_SERVICE_HOST=$SERVICE_HOST -AODH_SERVICE_PORT=${AODH_SERVICE_PORT:-8042} - -# AODH_DEPLOY defines how Aodh is deployed, allowed values: -# - mod_wsgi : Run Aodh under Apache HTTPd mod_wsgi -# - simple : Run aodh-api -# - uwsgi : Run Aodh under uwsgi -# - : Fallback to AODH_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES -AODH_DEPLOY=${AODH_DEPLOY} - -AODH_NOTIFICATION_TOPICS=${AODH_NOTIFICATION_TOPICS:-notifications} - -AODH_COORDINATION_URL=${AODH_COORDINATION_URL:-} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,aodh - -# Set up default directories for client -GITDIR["python-aodhclient"]=$DEST/python-aodhclient - -# Get rid of this before done. -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 54a1a50d..00000000 --- a/doc/Makefile +++ /dev/null @@ -1,163 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " wadl to build a WADL file for api.openstack.org" - -clean: - -rm -rf $(BUILDDIR)/* - -html: check-dependencies - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -.PHONY: check-dependencies -check-dependencies: - @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) - -wadl: - $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl - @echo - @echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Ceilometer.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Ceilometer.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Ceilometer" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Ceilometer" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 0f2d5b0c..00000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -========================== -Telemetry Alarming service -========================== - -.. toctree:: - - telemetry-alarms.rst diff --git a/doc/source/admin/telemetry-alarms.rst b/doc/source/admin/telemetry-alarms.rst deleted file mode 100644 index 1667c6da..00000000 --- a/doc/source/admin/telemetry-alarms.rst +++ /dev/null @@ -1,505 +0,0 @@ -.. _telemetry-alarms: - -====== -Alarms -====== - -Alarms provide user-oriented Monitoring-as-a-Service for resources -running on OpenStack. This type of monitoring ensures you can -automatically scale in or out a group of instances through the -Orchestration service, but you can also use alarms for general-purpose -awareness of your cloud resources' health. - -These alarms follow a tri-state model: - -ok - The rule governing the alarm has been evaluated as ``False``. - -alarm - The rule governing the alarm have been evaluated as ``True``. - -insufficient data - There are not enough datapoints available in the evaluation periods - to meaningfully determine the alarm state. - -Alarm definitions -~~~~~~~~~~~~~~~~~ - -The definition of an alarm provides the rules that govern when a state -transition should occur, and the actions to be taken thereon. The -nature of these rules depend on the alarm type. - -Threshold rule alarms ---------------------- - -For conventional threshold-oriented alarms, state transitions are -governed by: - -* A static threshold value with a comparison operator such as greater - than or less than. - -* A statistic selection to aggregate the data. - -* A sliding time window to indicate how far back into the recent past - you want to look. - -Valid threshold alarms are: ``gnocchi_resources_threshold_rule``, -``gnocchi_aggregation_by_metrics_threshold_rule``, or -``gnocchi_aggregation_by_resources_threshold_rule``. - -.. note:: - - As of Ocata, the ``threshold`` alarm is deprecated since Ceilometer's - native storage API is deprecated. - -Composite rule alarms ---------------------- - -Composite alarms enable users to define an alarm with multiple triggering -conditions, using a combination of ``and`` and ``or`` relations. - - -Combination rule alarms ------------------------ - -.. note:: - - Combination alarms are deprecated as of Newton for composite alarms. - Combination alarm functionality is removed in Pike. - -The Telemetry service also supports the concept of a meta-alarm, which -aggregates over the current state of a set of underlying basic alarms -combined via a logical operator (``and`` or ``or``). - -Alarm dimensioning -~~~~~~~~~~~~~~~~~~ - -A key associated concept is the notion of *dimensioning* which -defines the set of matching meters that feed into an alarm -evaluation. Recall that meters are per-resource-instance, so in the -simplest case an alarm might be defined over a particular meter -applied to all resources visible to a particular user. More useful -however would be the option to explicitly select which specific -resources you are interested in alarming on. - -At one extreme you might have narrowly dimensioned alarms where this -selection would have only a single target (identified by resource -ID). At the other extreme, you could have widely dimensioned alarms -where this selection identifies many resources over which the -statistic is aggregated. For example all instances booted from a -particular image or all instances with matching user metadata (the -latter is how the Orchestration service identifies autoscaling -groups). - -Alarm evaluation -~~~~~~~~~~~~~~~~ - -Alarms are evaluated by the ``alarm-evaluator`` service on a periodic -basis, defaulting to once every minute. - -Alarm actions -------------- - -Any state transition of individual alarm (to ``ok``, ``alarm``, or -``insufficient data``) may have one or more actions associated with -it. These actions effectively send a signal to a consumer that the -state transition has occurred, and provide some additional context. -This includes the new and previous states, with some reason data -describing the disposition with respect to the threshold, the number -of datapoints involved and most recent of these. State transitions -are detected by the ``alarm-evaluator``, whereas the -``alarm-notifier`` effects the actual notification action. - -**Webhooks** - -These are the *de facto* notification type used by Telemetry alarming -and simply involve an HTTP POST request being sent to an endpoint, -with a request body containing a description of the state transition -encoded as a JSON fragment. - -**Log actions** - -These are a lightweight alternative to webhooks, whereby the state -transition is simply logged by the ``alarm-notifier``, and are -intended primarily for testing purposes. - -Workload partitioning ---------------------- - -The alarm evaluation process uses the same mechanism for workload -partitioning as the central and compute agents. The -`Tooz `_ library provides the -coordination within the groups of service instances. For further -information about this approach, see the `high availability guide -`_. - -To use this workload partitioning solution set the -``evaluation_service`` option to ``default``. For more -information, see the alarm section in the -`OpenStack Configuration Reference `_. - -Using alarms -~~~~~~~~~~~~ - -Alarm creation --------------- - -Threshold based alarm -````````````````````` - -An example of creating a Gnocchi threshold-oriented alarm, based on an upper -bound on the CPU utilization for a particular instance: - -.. code-block:: console - - $ aodh alarm create \ - --name cpu_hi \ - --type gnocchi_resources_threshold \ - --description 'instance running hot' \ - --metric cpu_util \ - --threshold 70.0 \ - --comparison-operator gt \ - --aggregation-method mean \ - --granularity 600 \ - --evaluation-periods 3 \ - --alarm-action 'log://' \ - --resource-id INSTANCE_ID \ - --resource-type instance - -This creates an alarm that will fire when the average CPU utilization -for an individual instance exceeds 70% for three consecutive 10 -minute periods. The notification in this case is simply a log message, -though it could alternatively be a webhook URL. - -.. note:: - - Alarm names must be unique for the alarms associated with an - individual project. Administrator can limit the maximum - resulting actions for three different states, and the - ability for a normal user to create ``log://`` and ``test://`` - notifiers is disabled. This prevents unintentional - consumption of disk and memory resources by the - Telemetry service. - -The sliding time window over which the alarm is evaluated is 30 -minutes in this example. This window is not clamped to wall-clock -time boundaries, rather it's anchored on the current time for each -evaluation cycle, and continually creeps forward as each evaluation -cycle rolls around (by default, this occurs every minute). - -.. note:: - - The alarm granularity must match the granularities of the metric configured - in Gnocchi. - -Otherwise the alarm will tend to flit in and out of the -``insufficient data`` state due to the mismatch between the actual -frequency of datapoints in the metering store and the statistics -queries used to compare against the alarm threshold. If a shorter -alarm period is needed, then the corresponding interval should be -adjusted in the ``pipeline.yaml`` file. - -Other notable alarm attributes that may be set on creation, or via a -subsequent update, include: - -state - The initial alarm state (defaults to ``insufficient data``). - -description - A free-text description of the alarm (defaults to a synopsis of the - alarm rule). - -enabled - True if evaluation and actioning is to be enabled for this alarm - (defaults to ``True``). - -repeat-actions - True if actions should be repeatedly notified while the alarm - remains in the target state (defaults to ``False``). - -ok-action - An action to invoke when the alarm state transitions to ``ok``. - -insufficient-data-action - An action to invoke when the alarm state transitions to - ``insufficient data``. - -time-constraint - Used to restrict evaluation of the alarm to certain times of the - day or days of the week (expressed as ``cron`` expression with an - optional timezone). - -Composite alarm -``````````````` - -An example of creating a combination alarm, based on the combined -state of two underlying alarms: - -.. code-block:: console - - $ aodh alarm create \ - --name meta \ - --type composite \ - --composite-rule '{"or": [{"threshold": 0.8, "metric": "cpu_util", \ - "type": "gnocchi_resources_threshold", "resource_id": INSTANCE_ID1, \ - "resource_type": "instance", "aggregation_method": "last"}, \ - {"threshold": 0.8, "metric": "cpu_util", \ - "type": "gnocchi_resources_threshold", "resource_id": INSTANCE_ID2, \ - "resource_type": "instance", "aggregation_method": "last"}]}' \ - --alarm-action 'http://example.org/notify' - -This creates an alarm that will fire when either one of two underlying -alarms transition into the alarm state. The notification in this case -is a webhook call. Any number of underlying alarms can be combined in -this way, using either ``and`` or ``or``. Additionally, combinations -can contain nested conditions: - -.. note:: - - Observe the *underscore in* ``resource_id`` & ``resource_type`` in - composite rule as opposed to ``--resource-id`` & - ``--resource-type`` CLI arguments. - -.. code-block:: console - - $ aodh alarm create \ - --name meta \ - --type composite \ - --composite-rule '{"or": [ALARM_1, {"and": [ALARM_2, ALARM_3]}]}' \ - --alarm-action 'http://example.org/notify' - - -Event based alarm -````````````````` - -An example of creating a event alarm based on power state of -instance: - -.. code-block:: console - - $ aodh alarm create \ - --type event \ - --name instance_off \ - --description 'Instance powered OFF' \ - --event-type "compute.instance.power_off.*" \ - --enable True \ - --query "traits.instance_id=string::INSTANCE_ID" \ - --alarm-action 'log://' \ - --ok-action 'log://' \ - --insufficient-data-action 'log://' - -Valid list of ``event-type`` and ``traits`` can be found in -``event_definitions.yaml`` file . ``--query`` may also contain mix of -traits for example to create alarm when instance is powered on but -went into error state: - -.. code-block:: console - - $ aodh alarm create \ - --type event \ - --name instance_on_but_in err_state \ - --description 'Instance powered ON but in error state' \ - --event-type "compute.instance.power_on.*" \ - --enable True \ - --query "traits.instance_id=string::INSTANCE_ID;traits.state=string::error" \ - --alarm-action 'log://' \ - --ok-action 'log://' \ - --insufficient-data-action 'log://' - -Sample output of alarm type **event**: - -.. code-block:: console - - +---------------------------+---------------------------------------------------------------+ - | Field | Value | - +---------------------------+---------------------------------------------------------------+ - | alarm_actions | [u'log://'] | - | alarm_id | 15c0da26-524d-40ad-8fba-3e55ee0ddc91 | - | description | Instance powered ON but in error state | - | enabled | True | - | event_type | compute.instance.power_on.* | - | insufficient_data_actions | [u'log://'] | - | name | instance_on_state_err | - | ok_actions | [u'log://'] | - | project_id | 9ee200732f4c4d10a6530bac746f1b6e | - | query | traits.instance_id = bb912729-fa51-443b-bac6-bf4c795f081d AND | - | | traits.state = error | - | repeat_actions | False | - | severity | low | - | state | insufficient data | - | state_timestamp | 2017-07-15T02:28:31.114455 | - | time_constraints | [] | - | timestamp | 2017-07-15T02:28:31.114455 | - | type | event | - | user_id | 89b4e48bcbdb4816add7800502bd5122 | - +---------------------------+---------------------------------------------------------------+ - -.. note:: - - To enable event alarms please refer `Configuration - `_ - -Alarm retrieval ---------------- - -You can display all your alarms via (some attributes are omitted for -brevity): - -.. code-block:: console - - $ aodh alarm list - +----------+-----------+--------+-------------------+----------+---------+ - | alarm_id | type | name | state | severity | enabled | - +----------+-----------+--------+-------------------+----------+---------+ - | ALARM_ID | threshold | cpu_hi | insufficient data | high | True | - +----------+-----------+--------+-------------------+----------+---------+ - -In this case, the state is reported as ``insufficient data`` which -could indicate that: - -* meters have not yet been gathered about this instance over the - evaluation window into the recent past (for example a brand-new - instance) - -* *or*, that the identified instance is not visible to the - user/project owning the alarm - -* *or*, simply that an alarm evaluation cycle hasn't kicked off since - the alarm was created (by default, alarms are evaluated once per - minute). - -.. note:: - - The visibility of alarms depends on the role and project - associated with the user issuing the query: - - * admin users see *all* alarms, regardless of the owner - - * non-admin users see only the alarms associated with their project - (as per the normal project segregation in OpenStack) - -Alarm update ------------- - -Once the state of the alarm has settled down, we might decide that we -set that bar too low with 70%, in which case the threshold (or most -any other alarm attribute) can be updated thusly: - -.. code-block:: console - - $ aodh alarm update ALARM_ID --threshold 75 - -The change will take effect from the next evaluation cycle, which by -default occurs every minute. - -Most alarm attributes can be changed in this way, but there is also -a convenient short-cut for getting and setting the alarm state: - -.. code-block:: console - - $ openstack alarm state get ALARM_ID - $ openstack alarm state set --state ok ALARM_ID - -Over time the state of the alarm may change often, especially if the -threshold is chosen to be close to the trending value of the -statistic. You can follow the history of an alarm over its lifecycle -via the audit API: - -.. code-block:: console - - $ aodh alarm-history show ALARM_ID - +-----------+------------------+---------------------------------------------------+----------+ - | timestamp | type | detail | event_id | - +-----------+------------------+---------------------------------------------------+----------+ - | TIME_3 | rule change | {"rule": {"evaluation_periods": 3, "metric": | EVENT_ID | - | | | "cpu_util", "resource_id": RESOURCE_ID, | | - | | | "aggregation_method": "mean", "granularity":600, | | - | | | "threshold": 75.0, "comparison_operator": "gt" | | - | | | "resource_type": "instance"}} | | - | TIME_2 | state transition | {"transition_reason": "Transition to alarm due 3 | EVENT_ID | - | | | samples outside threshold, most recent: | | - | | | 81.4108514719", "state": "alarm"} | | - | TIME_1 | state transition | {"transition_reason": "Transition to ok due to 1 | EVENT_ID | - | | | samples inside threshold, most recent: | | - | | | 67.952938019089", "state": "ok"} | | - | TIME_0 | creation | {"alarm_actions": ["log://"], "user_id": USER_ID, | EVENT_ID | - | | | "name": "cup_hi", "state": "insufficient data", | | - | | | "timestamp": TIME_0, "description": "instance | | - | | | running hot", "enabled": true, "state_timestamp": | | - | | | TIME_0, "rule": {"evaluation_periods": 3, | | - | | | "metric": "cpu_util", "resource_id": RESOURCE_ID, | | - | | | "aggregation_method": "mean", "granularity": 600, | | - | | | "resource_type": "instance"}, "alarm_id": | | - | | | ALARM_ID, "time_constraints": [], | | - | | | "insufficient_data_actions": [], | | - | | | "repeat_actions": false, "ok_actions": [], | | - | | | "project_id": PROJECT_ID, "type": | | - | | | "gnocchi_resources_threshold", "severity": "low"} | | - +-----------+------------------+---------------------------------------------------+----------+ - -Alarm deletion --------------- - -An alarm that is no longer required can be disabled so that it is no -longer actively evaluated: - -.. code-block:: console - - $ aodh alarm update --enabled False ALARM_ID - -or even deleted permanently (an irreversible step): - -.. code-block:: console - - $ aodh alarm delete ALARM_ID - -Debug alarms ------------- - -A good place to start is to add ``--debug`` flag when creating or -updating an alarm. For example: - -.. code-block:: console - - $ aodh --debug alarm create - -Look for the state to transition when event is triggered in -``/var/log/aodh/listener.log`` file. For example, the below logs shows -the transition state of alarm with id -``85a2942f-a2ec-4310-baea-d58f9db98654`` triggered by event id -``abe437a3-b75b-40b4-a3cb-26022a919f5e`` - -.. code-block:: console - - 2017-07-15 07:03:20.149 2866 INFO aodh.evaluator [-] alarm 85a2942f-a2ec-4310-baea-d58f9db98654 transitioning to alarm because Event hits the query . - - -The below entry in ``/var/log/aodh/notifier.log`` also confirms that -event id ``abe437a3-b75b-40b4-a3cb-26022a919f5e`` hits the query -matching instance id ``bb912729-fa51-443b-bac6-bf4c795f081d`` - -.. code-block:: console - - 2017-07-15 07:03:24.071 2863 INFO aodh.notifier.log [-] Notifying alarm instance_off 85a2942f-a2ec-4310-baea-d58f9db98654 of low priority from insufficient data to alarm with action log: because Event hits the query - - -``aodh alarm-history`` as mentioned earlier will also display the -transition: - -.. code-block:: console - - $ aodh alarm-history show 85a2942f-a2ec-4310-baea-d58f9db98654 - +----------------------------+------------------+--------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ - | timestamp | type | detail | event_id | - +----------------------------+------------------+--------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ - | 2017-07-15T01:33:20.390623 | state transition | {"transition_reason": "Event hits | c5ca92ae-584b-4da6-a12c-b7a00dd39fef | - | | | the query .", "state": "alarm"} | | - | 2017-07-15T01:31:14.516188 | creation | {"alarm_actions": ["log://"], "user_id": "89b4e48bcbdb4816add7800502bd5122", "name": "instance_off", "state": | fb31f4c2-e357-44c3-9b6a-bd2aaaa4ae68 | - | | | "insufficient data", "timestamp": "2017-07-15T01:31:14.516188", "description": "event_instance_power_off", "enabled": | | - | | | true, "state_timestamp": "2017-07-15T01:31:14.516188", "rule": {"query": [{"field": "traits.instance_id", "type": | | - | | | "string", "value": "bb912729-fa51-443b-bac6-bf4c795f081d", "op": "eq"}], "event_type": "compute.instance.power_off.*"}, | | - | | | "alarm_id": "85a2942f-a2ec-4310-baea-d58f9db98654", "time_constraints": [], "insufficient_data_actions": ["log://"], | | - | | | "repeat_actions": false, "ok_actions": ["log://"], "project_id": "9ee200732f4c4d10a6530bac746f1b6e", "type": "event", | | - | | | "severity": "low"} | | - +----------------------------+------------------+--------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index cb324a75..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,331 +0,0 @@ -# -# Aodh documentation build configuration file, created by -# sphinx-quickstart on Thu Oct 27 11:38:59 2011. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import subprocess -import warnings - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) - -sys.path.insert(0, ROOT) -sys.path.insert(0, BASE_DIR) - -# This is required for ReadTheDocs.org, but isn't a bad idea anyway. -os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings' - - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. -# They can be extensions coming with Sphinx (named 'sphinx.ext.*') -# or your custom ones. -extensions = [ - 'openstackdocstheme', - 'sphinx.ext.autodoc', - 'wsmeext.sphinxext', - 'sphinx.ext.coverage', - 'sphinx.ext.viewcode', - 'sphinxcontrib.pecanwsme.rest', - 'stevedore.sphinxext', - 'oslo_config.sphinxconfiggen', - 'sphinxcontrib.httpdomain', -] - -config_generator_config_file = os.path.join(ROOT, - 'aodh/cmd/aodh-config-generator.conf') -sample_config_basename = '_static/aodh' - -wsme_protocols = ['restjson', 'restxml'] - -todo_include_todos = True - -# Add any paths that contain templates here, relative to this directory. -if os.getenv('HUDSON_PUBLISH_DOCS'): - templates_path = ['_ga', '_templates'] -else: - templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Aodh' -copyright = u'2012-2015, OpenStack Foundation' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['**/#*', '**~', '**/#*#', '**/*alembic*'] - -# The reST default role (used for this markup: `text`) -# to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -primary_domain = 'py' -nitpicky = False - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# html_theme_path = ['.'] -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "nosidebar": "false" -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# openstackdocstheme options -repository_name = 'openstack/aodh' -bug_project = 'aodh' -bug_tag = '' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Aodhdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - #'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Aodh.tex', u'Aodh Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output ------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'aodh', u'Aodh Documentation', - [u'OpenStack'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ----------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'Aodh', u'Aodh Documentation', u'OpenStack', - 'Aodh', 'One line description of project.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - - -# -- Options for Epub output -------------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = u'Aodh' -epub_author = u'OpenStack' -epub_publisher = u'OpenStack' -epub_copyright = u'2012-2015, OpenStack' - -# The language of the text. It defaults to the language option -# or en if the language is not set. -#epub_language = '' - -# The scheme of the identifier. Typical schemes are ISBN or URL. -#epub_scheme = '' - -# The unique identifier of the text. This can be an ISBN number -# or the project homepage. -#epub_identifier = '' - -# A unique identification for the text. -#epub_uid = '' - -# A tuple containing the cover image and cover page html template filenames. -#epub_cover = () - -# HTML files that should be inserted before the pages created by sphinx. -# The format is a list of tuples containing the path and title. -#epub_pre_files = [] - -# HTML files shat should be inserted after the pages created by sphinx. -# The format is a list of tuples containing the path and title. -#epub_post_files = [] - -# A list of files that should not be packed into the epub file. -#epub_exclude_files = [] - -# The depth of the table of contents in toc.ncx. -#epub_tocdepth = 3 - -# Allow duplicate toc entries. -#epub_tocdup = True - -# NOTE(dhellmann): pbr used to set this option but now that we are -# using Sphinx>=1.6.2 it does not so we have to set it ourselves. -suppress_warnings = [ - 'app.add_directive', 'app.add_role', - 'app.add_generic_role', 'app.add_node', - 'image.nonlocal_uri', -] diff --git a/doc/source/contributor/architecture.rst b/doc/source/contributor/architecture.rst deleted file mode 100644 index 126ff93b..00000000 --- a/doc/source/contributor/architecture.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _architecture: - -===================== - System Architecture -===================== - -High-Level Architecture -======================= - -Each of Aodh's services are designed to scale horizontally. Additional -workers and nodes can be added depending on the expected load. It provides -daemons to evaluate and notify based on defined alarming rules. - -Evaluating the data -=================== - -Alarming Service ----------------- - -The alarming component of Aodh, first delivered in Ceilometer service during -Havana development cycle then split out to this independent project in Liberty -development cycle, allows you to set alarms based on threshold evaluation for -a collection of samples or a dedicate event. An alarm can be set on a single -meter, or on a combination. For example, you may want to trigger an alarm when -the memory consumption reaches 70% on a given instance if the instance has been -up for more than 10 min. To setup an alarm, you will call -:ref:`Aodh's API server ` specifying the alarm conditions and -an action to take. - -Of course, if you are not administrator of the cloud itself, you can only set -alarms on meters for your own components. - -There can be multiple form of actions, but only several actions have been -implemented so far: - -1. :term:`HTTP callback`: you provide a URL to be called whenever the alarm has - been set off. The payload of the request contains all the details of why the - alarm was triggered. -2. :term:`log`: mostly useful for debugging, stores alarms in a log file. -3. :term:`zaqar`: Send notification to messaging service via Zaqar API. - -Alarm Rules -=========== - -.. list-plugins:: aodh.alarm.rule - :detailed: - -Alarm Evaluators -================ - -.. list-plugins:: aodh.evaluator - :detailed: - -Alarm Notifiers -=============== - -.. list-plugins:: aodh.notifier - :detailed: - -Alarm Storage -=============== - -.. list-plugins:: aodh.storage - :detailed: diff --git a/doc/source/contributor/configuration.rst b/doc/source/contributor/configuration.rst deleted file mode 100644 index 70de8486..00000000 --- a/doc/source/contributor/configuration.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Copyright 2012 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============= -Configuration -============= - -Aodh Sample Configuration File -============================== - -Configure Aodh by editing /etc/aodh/aodh.conf. - -No config file is provided with the source code, it will be created during -the installation. In case where no configuration file was installed, one -can be easily created by running:: - - aodh-config-generator - -The following is a sample Aodh configuration for adaptation and use. It is -auto-generated from Aodh when this documentation is built, and can also be -viewed in `file form <_static/aodh.conf.sample>`_. - -.. literalinclude:: ../_static/aodh.conf.sample diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index bf435f36..00000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _contributing: - -====================== - Contributing to Aodh -====================== - -Aodh follows the same workflow as other OpenStack projects. To start -contributing to Aodh, please follow the workflow found here_. - -.. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow - - -Project Hosting Details -======================= - -:Bug tracker: https://launchpad.net/aodh -:Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Aodh]`` for faster responses) -:Code Hosting: https://git.openstack.org/cgit/openstack/aodh/ -:Code Review: https://review.openstack.org/#/q/status:open+project:openstack/aodh,n,z diff --git a/doc/source/contributor/event-alarm.rst b/doc/source/contributor/event-alarm.rst deleted file mode 100644 index e8a0e30e..00000000 --- a/doc/source/contributor/event-alarm.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. - Copyright 2014 Huawei Technologies Co., Ltd. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=========== -Event alarm -=========== - -Aodh allows users to define alarms which can be evaluated based on events -passed from other OpenStack services. The events can be emitted when the -resources from other OpenStack services have been updated, created or deleted, -such as 'compute.instance.reboot.end', 'scheduler.select_destinations.end'. -When creating an alarm with type of "event", an event_type can be specified to -identify the type of evernt that will trigger the alarm. The event_type field -support fuzzy matching with wildcard. Additionally, users can also specify -query conditions to filter specific events used to trigger the alarm. - -This feature was implemented with proposal event-alarm_. - -.. _event-alarm: https://blueprints.launchpad.net/ceilometer/+spec/event-alarm-evaluator - -Usage -===== - -When creating an alarm of "event" type, the "event_rule" need to be specified, -which includes an "event_type" field and a "query" field, the "event_type" -allow users to specify a specific event type used to match the incoming events -when evaluating alarm, and the "query" field includes a list of query -conditions used to filter specific events when evaluating the alarm. - -The following is an example of event alarm rule:: - - "event_rule": { - "event_type": "compute.instance.update", - "query" : [ - { - "field" : "traits.instance_id", - "type" : "string", - "value" : "153462d0-a9b8-4b5b-8175-9e4b05e9b856", - "op" : "eq", - }, - { - "field" : "traits.state", - "type" : "string", - "value" : "error", - "op" : "eq", - }, - ] - } - - -Configuration -============= - -To enable this functionality, config the Ceilometer to be able to -publish events to the queue the aodh-listener service listen on. The -*event_alarm_topic* config option of Aodh identify which messaging -topic the aodh-listener on, the default value is "alarm.all". In -Ceilometer side, a publisher of notifier type need to be configured in -the event pipeline config file(``event_pipeline.yaml`` as default), -the notifier should be with a messaging topic same as the -*event_alarm_topic* option defined. For an example:: - - --- - sources: - - name: event_source - events: - - "*" - sinks: - - event_sink - sinks: - - name: event_sink - transformers: - publishers: - - notifier:// - - notifier://?topic=alarm.all diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index d7f8685b..00000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================================ -Welcome to the Aodh developer documentation! -============================================ - -What is the purpose of the project and vision for it? -===================================================== - -* Provide alarms and notifications based on metrics. - -This documentation offers information on how Aodh works and how to contribute -to the project. - -Overview -======== - -.. toctree:: - :maxdepth: 2 - - architecture - webapi/index - -Developer Documentation -======================= - -.. toctree:: - :maxdepth: 2 - - install/index - configuration - testing - contributing - event-alarm - api/autoindex - -Appendix -======== - -.. toctree:: - :maxdepth: 1 - - releasenotes/index - -.. update index - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/contributor/install/development.rst b/doc/source/contributor/install/development.rst deleted file mode 100644 index d8c170ac..00000000 --- a/doc/source/contributor/install/development.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - 2013 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=============================== - Installing development sandbox -=============================== - -Configuring devstack -==================== - -.. index:: - double: installing; devstack - -1. Download devstack_. - -2. Create a ``local.conf`` file as input to devstack. - - .. note:: - - ``local.conf`` replaces the former configuration file called ``localrc``. - If you used localrc before, remove it to switch to using the new file. - For further information see the `devstack configuration - `_. - -3. The aodh services are not enabled by default, so they must be - enabled in ``local.conf`` before running ``stack.sh``. - - This example ``local.conf`` file shows all of the settings required for - aodh:: - - [[local|localrc]] - - # Enable the aodh alarming services - enable_plugin aodh https://git.openstack.org/openstack/aodh master - -.. _devstack: https://docs.openstack.org/devstack/latest/ diff --git a/doc/source/contributor/install/index.rst b/doc/source/contributor/install/index.rst deleted file mode 100644 index 4e4009d1..00000000 --- a/doc/source/contributor/install/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Copyright 2013 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install: - -================= - Installing Aodh -================= - -.. toctree:: - :maxdepth: 2 - - development - manual - mod_wsgi - uwsgi diff --git a/doc/source/contributor/install/manual.rst b/doc/source/contributor/install/manual.rst deleted file mode 100644 index a2fb179c..00000000 --- a/doc/source/contributor/install/manual.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - 2013 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _installing_manually: - -=================== -Installing Manually -=================== - -Installing the API Server -========================= -There are two recommended ways to start api server: - 1. Starting API server through mod_wsgi_; - 2. Starting API server through: uwsgi_. - -Not recommended, for testing purpose, we can also start api server by -aodh-api binary:: - - aodh-api --port 8042 -- --config-file /etc/aodh/aodh.conf - -Database configuration -====================== - -You can use any SQLAlchemy-supported DB such as `PostgreSQL` or `MySQL`. -To use MySQL as the storage backend, change the 'database' section in -aodh.conf as follows:: - - [database] - connection = mysql+pymysql://username:password@host/aodh?charset=utf8 - -.. _mod_wsgi: ../install/mod_wsgi.html -.. _uwsgi: ../install/uwsgi.html diff --git a/doc/source/contributor/install/mod_wsgi.rst b/doc/source/contributor/install/mod_wsgi.rst deleted file mode 100644 index 890c6713..00000000 --- a/doc/source/contributor/install/mod_wsgi.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. - Copyright 2013 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=================================== - Installing the API behind mod_wsgi -=================================== - -Aodh comes with a WSGI application file named `aodh/api/app.wsgi` for -configuring the API service to run behind Apache with ``mod_wsgi``. This file -is installed with the rest of the Aodh application code, and should not need to -be modified. - -You can then configure Apache with something like this:: - - Listen 8042 - - - WSGIDaemonProcess aodh-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} - WSGIProcessGroup aodh-api - WSGIScriptAlias / /usr/lib/python2.7/dist-packages/aodh/api/app - WSGIApplicationGroup %{GLOBAL} - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/httpd/aodh_error.log - CustomLog /var/log/httpd/aodh_access.log combined - - - WSGISocketPrefix /var/run/httpd - - -Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` -values to an appropriate user on your server. In many installations ``aodh`` -will be correct. diff --git a/doc/source/contributor/install/uwsgi.rst b/doc/source/contributor/install/uwsgi.rst deleted file mode 100644 index 480bc23e..00000000 --- a/doc/source/contributor/install/uwsgi.rst +++ /dev/null @@ -1,62 +0,0 @@ -============================== - Installing the API with uwsgi -============================== - -Aodh comes with a few example files for configuring the API -service to run behind Apache with ``mod_wsgi``. - -app.wsgi -======== - -The file ``aodh/api/app.wsgi`` sets up the V2 API WSGI -application. The file is installed with the rest of the Aodh -application code, and should not need to be modified. - -Example of uwsgi configuration file -=================================== - - -Create aodh-uwsgi.ini file:: - - [uwsgi] - http = 0.0.0.0:8041 - wsgi-file = /aodh/api/app.wsgi - plugins = python - # This is running standalone - master = true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - exit-on-reload = true - die-on-term = true - # uwsgi recommends this to prevent thundering herd on accept. - thunder-lock = true - # Override the default size for headers from the 4k default. (mainly for keystone token) - buffer-size = 65535 - enable-threads = true - # Set the number of threads usually with the returns of command nproc - threads = 8 - # Make sure the client doesn't try to re-use the connection. - add-header = Connection: close - # Set uid and gip to an appropriate user on your server. In many - # installations ``aodh`` will be correct. - uid = aodh - gid = aodh - -Then start the uwsgi server:: - - uwsgi ./aodh-uwsgi.ini - -Or start in background with:: - - uwsgi -d ./aodh-uwsgi.ini - -Configuring with uwsgi-plugin-python on Debian/Ubuntu -===================================================== - -Install the Python plugin for uwsgi: - - apt-get install uwsgi-plugin-python - -Run the server: - - uwsgi_python --master --die-on-term --logto /var/log/aodh/aodh-api.log \ - --http-socket :8042 --wsgi-file /usr/share/aodh-common/app.wsgi diff --git a/doc/source/contributor/releasenotes/index.rst b/doc/source/contributor/releasenotes/index.rst deleted file mode 100644 index 161309c4..00000000 --- a/doc/source/contributor/releasenotes/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. - Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================ - Release Notes -============================ - -.. toctree:: - :hidden: - -* `Liberty`_ - -Since Mitaka development cycle, we start to host release notes on: -`Aodh Release Notes`_ - -.. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 -.. _Aodh Release Notes: https://docs.openstack.org/releasenotes/aodh/ diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index 1abb2da7..00000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. - Copyright 2012 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Running the Tests -================= - -Aodh includes an extensive set of automated unit tests which are -run through tox_. - -1. Install ``tox``:: - - $ sudo pip install tox - -2. On Ubuntu install ``libmysqlclient-dev`` packages:: - - $ sudo apt-get install libmysqlclient-dev - - For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need - to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead:: - - $ sudo yum install mariadb-devel.x86_64 - -3. Run the unit and code-style tests:: - - $ cd /opt/stack/aodh - $ tox -e py27,pep8 - - As tox is a wrapper around testr, it also accepts the same flags as testr. - See the `testr documentation`_ for details about these additional flags. - -.. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html - - Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2:: - - $ tox -e py27 -- api.v2 - - To debug tests (ie. break into pdb debugger), you can use ''debug'' tox - environment. Here's an example, passing the name of a test since you'll - normally only want to run the test that hits your breakpoint:: - - $ tox -e debug aodh.tests.test_bin - - For reference, the ``debug`` tox environment implements the instructions - here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests - -4. There is a growing suite of tests which use a tool called `gabbi`_ to - test and validate the behavior of the Aodh API. These tests are run - when using the usual ``functional`` tox target but if desired they can be - run by themselves:: - - $ tox -e gabbi - - The YAML files used to drive the gabbi tests can be found in - ``aodh/tests/functional/gabbi/gabbits``. If you are adding to or adjusting - the API you should consider adding tests here. - -.. _gabbi: https://gabbi.readthedocs.io/en/latest/ - -.. seealso:: - - * tox_ - -.. _tox: https://tox.readthedocs.io/en/latest/ diff --git a/doc/source/contributor/webapi/index.rst b/doc/source/contributor/webapi/index.rst deleted file mode 100644 index a3a2c03a..00000000 --- a/doc/source/contributor/webapi/index.rst +++ /dev/null @@ -1,47 +0,0 @@ -========= - Web API -========= - -.. toctree:: - :maxdepth: 2 - - v2 - -You can get API version list via request to endpoint root path. For example:: - - curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8042 - -Sample response:: - - { - "versions": { - "values": [ - { - "id": "v2", - "links": [ - { - "href": "http://127.0.0.1:8042/v2", - "rel": "self" - }, - { - "href": "http://docs.openstack.org/", - "rel": "describedby", - "type": "text/html" - } - ], - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.telemetry-v2+json" - }, - { - "base": "application/xml", - "type": "application/vnd.openstack.telemetry-v2+xml" - } - ], - "status": "stable", - "updated": "2013-02-13T00:00:00Z" - } - ] - } - } diff --git a/doc/source/contributor/webapi/v2.rst b/doc/source/contributor/webapi/v2.rst deleted file mode 100644 index 6fffc6ee..00000000 --- a/doc/source/contributor/webapi/v2.rst +++ /dev/null @@ -1,105 +0,0 @@ -============ - V2 Web API -============ - -Capabilities -============ - -The Capabilities API allows you to directly discover which functions from the -V2 API functionality, including the selectable aggregate functions, are -supported by the currently configured storage driver. A capabilities query -returns a flattened dictionary of properties with associated boolean values - -a 'False' or absent value means that the corresponding feature is not -available in the backend. - -.. rest-controller:: aodh.api.controllers.v2.capabilities:CapabilitiesController - :webprefix: /v2/capabilities - -.. autotype:: aodh.api.controllers.v2.capabilities.Capabilities - :members: - -.. _alarms-api: - -Alarms -====== - -.. rest-controller:: aodh.api.controllers.v2.alarms:AlarmsController - :webprefix: /v2/alarms - -.. rest-controller:: aodh.api.controllers.v2.alarms:AlarmController - :webprefix: /v2/alarms - -.. autotype:: aodh.api.controllers.v2.alarms.Alarm - :members: - -.. autotype:: aodh.api.controllers.v2.alarm_rules.threshold.AlarmThresholdRule - :members: - -.. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.MetricOfResourceRule - :members: - -.. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricByResourcesLookupRule - :members: - -.. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricsByIdLookupRule - :members: - -.. autotype:: aodh.api.controllers.v2.alarms.AlarmTimeConstraint - :members: - -.. autotype:: aodh.api.controllers.v2.alarms.AlarmChange - :members: - -Filtering Queries -================= - -The filter expressions of the query feature operate on the fields of *Alarm* -and *AlarmChange*. The following comparison operators are supported: *=*, *!=*, -*<*, *<=*, *>*, *>=* and *in*; and the following logical operators can be used: -*and* *or* and *not*. The field names are validated against the database -models. - -Complex Query supports defining the list of orderby expressions in the form -of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. - -The number of the returned items can be bounded using the *limit* option. - -The *filter*, *orderby* and *limit* are all optional fields in a query. - -.. rest-controller:: aodh.api.controllers.v2.query:QueryAlarmsController - :webprefix: /v2/query/alarms - -.. rest-controller:: aodh.api.controllers.v2.query:QueryAlarmHistoryController - :webprefix: /v2/query/alarms/history - -.. autotype:: aodh.api.controllers.v2.query.ComplexQuery - :members: - -Composite rule Alarm -==================== - -The *composite* type alarm allows users to specify a composite rule to define -an alarm with multiple triggering conditions, using a combination of *and* and -*or* relations. A composite rule is composed of multiple threshold rules or -gnocchi rules. A sample composite alarm request form is as follows:: - - { - "name": "test_composite", - "type": "composite", - "composite_rule": { - "and": [THRESHOLD_RULE1, THRESHOLD_RULE2, { - 'or': [THRESHOLD_RULE3, GNOCCHI_RULE1, - GNOCCHI_RULE2, GNOCCHI_RULE3] - }] - } - } - -A sub-rule in composite_rule is same as a threshold_rule in threshold alarm -or a gnocchi_rule in gnocchi alarm. Additionally it has a mandatory *type* -field to specify the rule type, like in the following sample:: - - { - "threshold": 0.8, - "meter_name": "cpu_util", - "type": "threshold" - } diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index e73e40d1..00000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - Copyright 2013 eNovance - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========== - Glossary -========== - -.. glossary:: - - alarm - An action triggered whenever a meter reaches a certain threshold. - - API server - HTTP REST API service for Aodh. - - ceilometer - From Wikipedia [#]_: - - A ceilometer is a device that uses a laser or other light - source to determine the height of a cloud base. - - http callback - HTTP callback is used for calling a predefined URL, whenever an - alarm has been set off. The payload of the request contains - all the details of why the alarm was triggered. - - log - Logging is one of the alarm actions that is useful mostly for debugging, - it stores the alarms in a log file. - - zaqar - According to `Zaqar Developer Documentation`_: - - Zaqar is a multi-tenant cloud messaging and notification service for web - and mobile developers. - - project - The OpenStack tenant or project. - - resource - The OpenStack entity being metered (e.g. instance, volume, image, etc). - - user - An OpenStack user. - -.. [#] http://en.wikipedia.org/wiki/Ceilometer -.. _Zaqar Developer Documentation: https://docs.openstack.org/zaqar/latest/ diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 6d14b2b5..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Copyright 2010 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================== -Welcome to Aodh's documentation! -================================== - -The Alarming service (aodh) project provides a service that enables the ability -to trigger actions based on defined rules against metric or event data -collected by Ceilometer or Gnocchi. - -.. toctree:: - :maxdepth: 2 - - install/index - contributor/index - admin/index - -.. toctree:: - :maxdepth: 1 - - glossary diff --git a/doc/source/install/configure-common.rst b/doc/source/install/configure-common.rst deleted file mode 100644 index aec40a1e..00000000 --- a/doc/source/install/configure-common.rst +++ /dev/null @@ -1,75 +0,0 @@ -2. Edit the ``/etc/aodh/aodh.conf`` file and complete the following actions: - - * In the ``[database]`` section, configure database access: - - .. code-block:: ini - - [database] - ... - connection = mysql+pymysql://aodh:AODH_DBPASS@controller/aodh - - Replace ``AODH_DBPASS`` with the password you chose for the - Telemetry Alarming module database. You must escape special characters - such as ``:``, ``/``, ``+``, and ``@`` in the connection string in accordance - with `RFC2396 `_. - - * In the ``[DEFAULT]`` section, - configure ``RabbitMQ`` message queue access: - - .. code-block:: ini - - [DEFAULT] - ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. code-block:: ini - - [DEFAULT] - ... - auth_strategy = keystone - - [keystone_authtoken] - ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_id = default - user_domain_id = default - project_name = service - username = aodh - password = AODH_PASS - - Replace ``AODH_PASS`` with the password you chose for - the ``aodh`` user in the Identity service. - - * In the ``[service_credentials]`` section, configure service credentials: - - .. code-block:: ini - - [service_credentials] - ... - auth_type = password - auth_url = http://controller:5000/v3 - project_domain_id = default - user_domain_id = default - project_name = service - username = aodh - password = AODH_PASS - interface = internalURL - region_name = RegionOne - - Replace ``AODH_PASS`` with the password you chose for - the ``aodh`` user in the Identity service. - -.. todo: - - Workaround for https://bugs.launchpad.net/ubuntu/+source/aodh/+bug/1513599. - -3. In order to initialize the database please run the ``aodh-dbsync`` script. diff --git a/doc/source/install/get_started.rst b/doc/source/install/get_started.rst deleted file mode 100644 index 6361d1bd..00000000 --- a/doc/source/install/get_started.rst +++ /dev/null @@ -1,28 +0,0 @@ -=================================== -Telemetry Alarming service overview -=================================== - -The Telemetry Alarming services trigger alarms when the collected metering -or event data break the defined rules. - -The Telemetry Alarming service consists of the following components: - -An API server (``aodh-api``) - Runs on one or more central management servers to provide access - to the alarm information stored in the data store. - -An alarm evaluator (``aodh-evaluator``) - Runs on one or more central management servers to determine when - alarms fire due to the associated statistic trend crossing a - threshold over a sliding time window. - -A notification listener (``aodh-listener``) - Runs on a central management server and determines when to fire alarms. - The alarms are generated based on defined rules against events, which are - captured by the Telemetry Data Collection service's notification agents. - -An alarm notifier (``aodh-notifier``) - Runs on one or more central management servers to allow alarms to be - set based on the threshold evaluation for a collection of samples. - -These services communicate by using the OpenStack messaging bus. diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 6283655a..00000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -========================== -Telemetry Alarming service -========================== - -.. toctree:: - - get_started.rst - install-obs.rst - install-rdo.rst - install-ubuntu.rst - next-steps.rst -.. verify.rst - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorials and Guides `_. diff --git a/doc/source/install/install-obs.rst b/doc/source/install/install-obs.rst deleted file mode 100644 index 17a54680..00000000 --- a/doc/source/install/install-obs.rst +++ /dev/null @@ -1,51 +0,0 @@ -.. _install-obs: - -Install and configure for openSUSE and SUSE Linux Enterprise -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the -Telemetry Alarming service, code-named aodh, on the controller node. - -This section assumes that you already have a working OpenStack -environment with at least the following components installed: -Compute, Image Service, Identity. - -.. include:: prereq-common.rst - -Install and configure components --------------------------------- - -.. note:: - - Default configuration files vary by distribution. You might need to add - these sections and options rather than modifying existing sections and - options. Also, an ellipsis (...) in the configuration snippets indicates - potential default configuration options that you should retain. - -1. Install the packages: - - .. code-block:: console - - # zypper install openstack-aodh-api \ - openstack-aodh-evaluator openstack-aodh-notifier \ - openstack-aodh-listener openstack-aodh-expirer \ - python-aodhclient - -.. include:: configure-common.rst - -Finalize installation ---------------------- - -#. Start the Telemetry Alarming services and configure them to start - when the system boots: - - .. code-block:: console - - # systemctl enable openstack-aodh-api.service \ - openstack-aodh-evaluator.service \ - openstack-aodh-notifier.service \ - openstack-aodh-listener.service - # systemctl start openstack-aodh-api.service \ - openstack-aodh-evaluator.service \ - openstack-aodh-notifier.service \ - openstack-aodh-listener.service diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index beba7169..00000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,51 +0,0 @@ -.. _install-rdo: - -Install and configure for Red Hat Enterprise Linux and CentOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the -Telemetry Alarming service, code-named aodh, on the controller node. - -This section assumes that you already have a working OpenStack -environment with at least the following components installed: -Compute, Image Service, Identity. - -.. include:: prereq-common.rst - -Install and configure components --------------------------------- - -.. note:: - - Default configuration files vary by distribution. You might need to add - these sections and options rather than modifying existing sections and - options. Also, an ellipsis (...) in the configuration snippets indicates - potential default configuration options that you should retain. - -1. Install the packages: - - .. code-block:: console - - # yum install openstack-aodh-api \ - openstack-aodh-evaluator openstack-aodh-notifier \ - openstack-aodh-listener openstack-aodh-expirer \ - python-aodhclient - -.. include:: configure-common.rst - -Finalize installation ---------------------- - -#. Start the Telemetry Alarming services and configure them to start - when the system boots: - - .. code-block:: console - - # systemctl enable openstack-aodh-api.service \ - openstack-aodh-evaluator.service \ - openstack-aodh-notifier.service \ - openstack-aodh-listener.service - # systemctl start openstack-aodh-api.service \ - openstack-aodh-evaluator.service \ - openstack-aodh-notifier.service \ - openstack-aodh-listener.service diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index 3bc3a1dd..00000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the -Telemetry Alarming service, code-named aodh, on the controller node. - -This section assumes that you already have a working OpenStack -environment with at least the following components installed: -Compute, Image Service, Identity. - -.. include:: prereq-common.rst - -Install and configure components --------------------------------- - -.. note:: - - Default configuration files vary by distribution. You might need to add - these sections and options rather than modifying existing sections and - options. Also, an ellipsis (...) in the configuration snippets indicates - potential default configuration options that you should retain. - -1. Install the packages: - - .. code-block:: console - - # apt-get install aodh-api aodh-evaluator aodh-notifier \ - aodh-listener aodh-expirer python-aodhclient - -.. include:: configure-common.rst - -Finalize installation ---------------------- - -#. Restart the Alarming services: - - .. code-block:: console - - # service aodh-api restart - # service aodh-evaluator restart - # service aodh-notifier restart - # service aodh-listener restart diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index 3f655303..00000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the aodh service. - -To add additional services, see the -`OpenStack Installation Tutorials and Guides `_ diff --git a/doc/source/install/prereq-common.rst b/doc/source/install/prereq-common.rst deleted file mode 100644 index 47ecf557..00000000 --- a/doc/source/install/prereq-common.rst +++ /dev/null @@ -1,139 +0,0 @@ -Prerequisites -------------- - -Before you install and configure the Telemetry service, you must create a -database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - * Use the database access client to connect to - the database server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - * Create the ``aodh`` database: - - .. code-block:: console - - CREATE DATABASE aodh; - - * Grant proper access to the ``aodh`` database: - - .. code-block:: console - - GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'localhost' \ - IDENTIFIED BY 'AODH_DBPASS'; - GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'%' \ - IDENTIFIED BY 'AODH_DBPASS'; - - Replace ``AODH_DBPASS`` with a suitable password. - - * Exit the database access client. - -#. Source the ``admin`` credentials to gain access to admin-only - CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - * Create the ``aodh`` user: - - .. code-block:: console - - $ openstack user create --domain default \ - --password-prompt aodh - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | b7657c9ea07a4556aef5d34cf70713a3 | - | name | aodh | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - * Add the ``admin`` role to the ``aodh`` user: - - .. code-block:: console - - $ openstack role add --project service --user aodh admin - - .. note:: - - This command provides no output. - - * Create the ``aodh`` service entity: - - .. code-block:: console - - $ openstack service create --name aodh \ - --description "Telemetry" alarming - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Telemetry | - | enabled | True | - | id | 3405453b14da441ebb258edfeba96d83 | - | name | aodh | - | type | alarming | - +-------------+----------------------------------+ - -#. Create the Alarming service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - alarming public http://controller:8042 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | aodh | - | service_type | alarming | - | url | http://controller:8042 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - alarming internal http://controller:8042 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | aodh | - | service_type | alarming | - | url | http://controller:8042 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - alarming admin http://controller:8042 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 340be3625e9b4239a6415d034e98aace | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | - | service_name | aodh | - | service_type | alarming | - | url | http://controller:8042 | - +--------------+----------------------------------+ diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 6e4f58e9..00000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,10 +0,0 @@ -:orphan: - -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Telemetry Alarming service. - -TBD diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 615fbd67..00000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,29 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* aodh is a task that is run in gates against aodh - - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* How to add rally-gates: https://rally.readthedocs.io/en/latest/quick_start/gates.html - -* About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/ceilometer.yaml b/rally-jobs/ceilometer.yaml deleted file mode 100644 index 059a8a69..00000000 --- a/rally-jobs/ceilometer.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- - - CeilometerAlarms.create_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerAlarms.create_and_delete_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerAlarms.create_and_list_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerAlarms.create_and_update_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerAlarms.list_alarms: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerMeters.list_meters: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerResource.list_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerStats.create_meter_and_get_stats: - - - args: - user_id: "user-id" - resource_id: "resource-id" - counter_volume: 1.0 - counter_unit: "" - counter_type: "cumulative" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerQueries.create_and_query_alarms: - - - args: - filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} - orderby: !!null - limit: 10 - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerQueries.create_and_query_alarm_history: - - - args: - orderby: !!null - limit: !!null - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: "1.0" - resource_id: "resource_id" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index aab343c5..00000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* - diff --git a/rally-jobs/extra/fake.img b/rally-jobs/extra/fake.img deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 33bec0d2..00000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All *.py modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. diff --git a/rally-jobs/plugins/plugin_sample.py b/rally-jobs/plugins/plugin_sample.py deleted file mode 100644 index 753300b6..00000000 --- a/rally-jobs/plugins/plugin_sample.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" Sample of plugin for Aodh. - -For more Aodh related benchmarks take a look here: -github.com/openstack/rally/blob/master/rally/benchmark/scenarios/aodh/ - -About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html - -Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts -""" - -from rally.benchmark.scenarios import base - - -class AodhPlugin(base.Scenario): - pass diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/Add-state-reason-to-the-API-7bc5a9465466db2b.yaml b/releasenotes/notes/Add-state-reason-to-the-API-7bc5a9465466db2b.yaml deleted file mode 100644 index bda07b7c..00000000 --- a/releasenotes/notes/Add-state-reason-to-the-API-7bc5a9465466db2b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - The reason of the state change is now part of the API as "state_reason" field of the alarm object. diff --git a/releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml b/releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml deleted file mode 100644 index 96af983b..00000000 --- a/releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - > - Add a tool for migrating alarm and alarm history data from NoSQL storage - to SQL storage. The migration tool has been tested OK in devstack - environment, but users need to be cautious with this, because the data - migration between storage backends is a bit dangerous. diff --git a/releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml b/releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml deleted file mode 100644 index 11980f70..00000000 --- a/releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1540395 `_] - Fix reason string to properly handle transitions when one sample is outside - of defined threshold. diff --git a/releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml b/releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml deleted file mode 100644 index 242107a0..00000000 --- a/releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - Add a new composite type alarm, which allow users specifying a composite - rule to define an alarm with multiple triggering conditions, using a - combination of *and*, *or* relationships. The composite rule is composed of - multiple threshold rules or gnocchi rules. diff --git a/releasenotes/notes/deprecate-combination-alarms-7ff26b73b61a0e59.yaml b/releasenotes/notes/deprecate-combination-alarms-7ff26b73b61a0e59.yaml deleted file mode 100644 index 61cc34e7..00000000 --- a/releasenotes/notes/deprecate-combination-alarms-7ff26b73b61a0e59.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - The combination alarms are officially deprecated and disabled by default. - Set api.enable_combination_alarms to True to enable them. - Existing alarms will still be evaluated, but access to them via the API is - linked to whether that configuration option is turned on or off. - It's advised to use composite alarms instead. diff --git a/releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml b/releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml deleted file mode 100644 index e7e3c45a..00000000 --- a/releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - > - Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient - backend for handling the scope of alarms. To maximise available resources, - NoSQL backends are deprecated so developers do not need to worry about - adding features to multiple backends. diff --git a/releasenotes/notes/deprecate-threshold-alarm-d89da351d4f6f50f.yaml b/releasenotes/notes/deprecate-threshold-alarm-d89da351d4f6f50f.yaml deleted file mode 100644 index 3d4f5465..00000000 --- a/releasenotes/notes/deprecate-threshold-alarm-d89da351d4f6f50f.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -deprecations: - - | - Ceilometer's API is deprecated in Ocata. Therefore, threshold alarms are - now deprecated as well. Threshold rules will be removed when Ceilometer's - API is also removed. Similar functionality is provided through Gnocchi - alarm rules: ``gnocchi_resources_threshold``, - ``gnocchi_aggregation_by_metrics_threshold``, or - ``gnocchi_aggregation_by_resources_threshold``. diff --git a/releasenotes/notes/enable-aodh-service-multi-processes-67ed9a0b7fac69aa.yaml b/releasenotes/notes/enable-aodh-service-multi-processes-67ed9a0b7fac69aa.yaml deleted file mode 100644 index 0938ad3e..00000000 --- a/releasenotes/notes/enable-aodh-service-multi-processes-67ed9a0b7fac69aa.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Enable aodh services, including aodh-evaluator, aodh-listener and - aodh-notifier to run in multiple worker mode. - - New options are introduced corresponsively as [evaluator]workers, - [listener]workers and [notifier]workers. They all default to 1. diff --git a/releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml b/releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml deleted file mode 100644 index 1acaf4be..00000000 --- a/releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - > - Add support for batch processing of messages from queue. This will allow - the aodh-listener to grab multiple event messages per thread to enable more - efficient processing. -upgrade: - - > - batch_size and batch_timeout configuration options are added to [listener] - section of configuration. The batch_size controls the number of messages to - grab before processing. Similarly, the batch_timeout defines the wait time - before processing. diff --git a/releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml b/releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml deleted file mode 100644 index a39a788a..00000000 --- a/releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1518447 `_] - Fix to ensure ceilometerclient is properly initialised on startup. diff --git a/releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml b/releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml deleted file mode 100644 index 4eccf04a..00000000 --- a/releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1511252 `_] - Fix an issue with combination alarms where it fails to evaluate - all issues in the chain of alarms. diff --git a/releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml b/releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml deleted file mode 100644 index ed4df619..00000000 --- a/releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1539069 `_] - Fix to handle scenario where no valid statistics exist for specified - period. diff --git a/releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml b/releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml deleted file mode 100644 index 76a7de0b..00000000 --- a/releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1513738 `_] - Fix an issue where alarms using Gnocchi aggregations are not being - evaluated. diff --git a/releasenotes/notes/fix-rbac-50825144e0897d7d.yaml b/releasenotes/notes/fix-rbac-50825144e0897d7d.yaml deleted file mode 100644 index 44a7c6aa..00000000 --- a/releasenotes/notes/fix-rbac-50825144e0897d7d.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -upgrade: - - > - A new default policy.json is provided to properly handle RBAC control. - Existing policy.json files may not grant the appropriate access. -security: - - > - Patch was added to address inconsistent RBAC policy handling. - Certain rules may not have been given appropriate access. -fixes: - - > - [`bug 1504495 `_] - Patch was added to address inconsistent RBAC policy handling. - Certain rules may not have been given appropriate access. diff --git a/releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml b/releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml deleted file mode 100644 index 504df12b..00000000 --- a/releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -fixes: - - > - [`bug 1582131 `_] - Fix an issue with adding CA_BUNDLE certificate parth as value of "verify" - parameter in SSL requests. - -features: - - > - A new option “rest_notifier_ca_bundle_certificate_path” has been added - in the configuration file, set None as default value. If this option is - present and SSL is used for alarm action the certificate path provided - will be used as value of verify parameter in action request. diff --git a/releasenotes/notes/gnocchi-capability-cache-75d011e77b8ecc72.yaml b/releasenotes/notes/gnocchi-capability-cache-75d011e77b8ecc72.yaml deleted file mode 100644 index 5bc4ccca..00000000 --- a/releasenotes/notes/gnocchi-capability-cache-75d011e77b8ecc72.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -other: - - | - Gnocchi aggregation capabilities are now cached to minimise redundant calls - to Gnocchi when validating aggregation methods. The cache is stored - in-memory for an hour. If additional aggregations are added to Gnocchi, - they will not be proprogated to Aodh's API service for at most an hour or - unless the service is restarted. diff --git a/releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml b/releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml deleted file mode 100644 index 01774a90..00000000 --- a/releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - Gnocchi dispatcher now uses client rather than direct http requests -upgrade: - - > - gnocchiclient library is now a requirement if using ceilometer+gnocchi. diff --git a/releasenotes/notes/gnocchi-external-resource-owner-3fad253d30746b0d.yaml b/releasenotes/notes/gnocchi-external-resource-owner-3fad253d30746b0d.yaml deleted file mode 100644 index c6fb419e..00000000 --- a/releasenotes/notes/gnocchi-external-resource-owner-3fad253d30746b0d.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -fixes: - - | - When an unprivileged user want to access to Gnocchi resources created by - Ceilometer, that doesn't work because the filter scope the Gnocchi query to - resource owner to the user. To fix we introduce a new configuration option - "gnocchi_external_project_owner" set by default to "service". The new - filter now allow two kind of Gnocchi resources: - - * owned by the user project - * owned by "gnocchi_external_project_owner" and the original project_id of - the resource is the user project. diff --git a/releasenotes/notes/healthcheck-560700b72ae68e18.yaml b/releasenotes/notes/healthcheck-560700b72ae68e18.yaml deleted file mode 100644 index 5e28af9c..00000000 --- a/releasenotes/notes/healthcheck-560700b72ae68e18.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A healthcheck endpoint is provided by default at /healthcheck. It leverages - oslo_middleware healthcheck middleware. It allows to retrieve information - about the health of the API service. diff --git a/releasenotes/notes/ingestion-lag-2317725887287fbc.yaml b/releasenotes/notes/ingestion-lag-2317725887287fbc.yaml deleted file mode 100644 index c1aef958..00000000 --- a/releasenotes/notes/ingestion-lag-2317725887287fbc.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Allow to extends the alarm evaluation windows to compensate the - reporting/ingestion lag. - - An new option is introduced additional_ingestion_lag defaulted to 0. - It represents the number of seconds of the window extension. diff --git a/releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml b/releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml deleted file mode 100644 index 87225fad..00000000 --- a/releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - > - Add support for Keystone v3 authentication diff --git a/releasenotes/notes/mysql-precise-datetime-e374c77e6707985e.yaml b/releasenotes/notes/mysql-precise-datetime-e374c77e6707985e.yaml deleted file mode 100644 index 0f3aa538..00000000 --- a/releasenotes/notes/mysql-precise-datetime-e374c77e6707985e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Aodh now leverages microseconds timestamps available since MySQL 5.6.4, - meaning it is now the minimum required version of MySQL. diff --git a/releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml b/releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml deleted file mode 100644 index dd9a20e6..00000000 --- a/releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - > - Add support for batch processing of messages from queue. This will allow - the aodh-notifier to grab multiple messages per thread to enable more - efficient processing. -upgrade: - - > - batch_size and batch_timeout configuration options are added to [notifier] - section of configuration. The batch_size controls the number of messages to - grab before processing. Similarly, the batch_timeout defines the wait time - before processing. diff --git a/releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml b/releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml deleted file mode 100644 index d87e0f6c..00000000 --- a/releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1575530 `_] - Patch was added to fix and improve the partition coordinator, make sure - the input tasks can be correctly distributed to partition members. diff --git a/releasenotes/notes/pecan-debug-removed-7c7a528a1aea98bf.yaml b/releasenotes/notes/pecan-debug-removed-7c7a528a1aea98bf.yaml deleted file mode 100644 index 2732bbb8..00000000 --- a/releasenotes/notes/pecan-debug-removed-7c7a528a1aea98bf.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - The api.pecan_debug option has been removed. diff --git a/releasenotes/notes/queue-communication-1b884feab4078dde.yaml b/releasenotes/notes/queue-communication-1b884feab4078dde.yaml deleted file mode 100644 index ad0479a2..00000000 --- a/releasenotes/notes/queue-communication-1b884feab4078dde.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - > - Support for queue based communication between alarm evaluator service and - alarm notifier services was added. Original implementation involved using - RPC but there is significant overhead involved with using RPC. Work queues - provided required functionality with better performance. -upgrade: - - > - Queue based communication is the new default IPC protocol. RPC can still - be used by choosing rpc as ipc_protocol option. Only one protocol can be - run at any given time. -deprecations: - - > - Because queues provide the equivalent functionality. RPC support is - deprecated and will be removed after Mitaka. diff --git a/releasenotes/notes/remove-alarm-name-unique-constraint-4fb0b14f3ad46f0b.yaml b/releasenotes/notes/remove-alarm-name-unique-constraint-4fb0b14f3ad46f0b.yaml deleted file mode 100644 index 1cc14e4c..00000000 --- a/releasenotes/notes/remove-alarm-name-unique-constraint-4fb0b14f3ad46f0b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -other: - - Alarm name unique constraint for each project has been removed. diff --git a/releasenotes/notes/remove-combination-alarms-a1a53655f3f7d1d1.yaml b/releasenotes/notes/remove-combination-alarms-a1a53655f3f7d1d1.yaml deleted file mode 100644 index 7101a6b3..00000000 --- a/releasenotes/notes/remove-combination-alarms-a1a53655f3f7d1d1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecations: - - The deprecated combination alarms support have been removed. diff --git a/releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml b/releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml deleted file mode 100644 index dc4bff42..00000000 --- a/releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - > - Remove eventlet from Aodh in favour of threaded approach diff --git a/releasenotes/notes/remove-no-sql-drivers-21dfdbd750751340.yaml b/releasenotes/notes/remove-no-sql-drivers-21dfdbd750751340.yaml deleted file mode 100644 index 886d9482..00000000 --- a/releasenotes/notes/remove-no-sql-drivers-21dfdbd750751340.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - All the deprecated non-SQL drivers have been removed. diff --git a/releasenotes/notes/support-combination-to-composite-conversion-3e688a6b7d01a57e.yaml b/releasenotes/notes/support-combination-to-composite-conversion-3e688a6b7d01a57e.yaml deleted file mode 100644 index 9499545d..00000000 --- a/releasenotes/notes/support-combination-to-composite-conversion-3e688a6b7d01a57e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - > - Add a tool for converting combination alarms to composite alarms, - since we have deprecated the combination alarm support and recommend - to use composite alarm to perform multiple conditions alarming. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 4251f2e9..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,282 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Aodh Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Aodh Release Notes' -copyright = u'2015, Aodh Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -import pbr.version -aodh_version = pbr.version.VersionInfo('aodh') -# The full version, including alpha/beta/rc tags. -release = aodh_version.version_string_with_vcs() -# The short X.Y version. -version = aodh_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# openstackdocstheme options -repository_name = 'openstack/aodh' -bug_project = 'aodh' -bug_tag = '' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'AodhReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'AodhReleaseNotes.tex', u'Aodh Release Notes Documentation', - u'Aodh Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'aodhreleasenotes', u'Aodh Release Notes Documentation', - [u'Aodh Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'AodhReleaseNotes', u'Aodh Release Notes Documentation', - u'Aodh Developers', 'AodhReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 8e324b74..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -==================== - Aodh Release Notes -==================== - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - mitaka - liberty diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 36217be8..00000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po deleted file mode 100644 index b7da29b5..00000000 --- a/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,391 +0,0 @@ -# Robert Simai , 2016. #zanata -# Robert Simai , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Aodh Release Notes 4.0.1\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-03-10 01:26+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-03-09 10:41+0000\n" -"Last-Translator: Robert Simai \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "1.1.1" -msgstr "1.1.1" - -msgid "1.1.3" -msgstr "1.1.3" - -msgid "2.0.0" -msgstr "2.0.0" - -msgid "2.0.1" -msgstr "2.0.1" - -msgid "2.0.2" -msgstr "2.0.2" - -msgid "3.0.0" -msgstr "3.0.0" - -msgid "4.0.0" -msgstr "4.0.0" - -msgid "" -"A healthcheck endpoint is provided by default at /healthcheck. It leverages " -"oslo_middleware healthcheck middleware. It allows to retrieve information " -"about the health of the API service." -msgstr "" -"Ein Endpunkt für die Gesundheitsprüfung wird standardmäßig als /healthcheck " -"bereitgestellt. Dies unterstützt oslo_middleware Gesundheitsprüfungs-" -"Middleware. Es erlaubt den Abruf von Informationen über die Gesundheit des " -"API-Dienstes." - -msgid "" -"A new default policy.json is provided to properly handle RBAC control. " -"Existing policy.json files may not grant the appropriate access." -msgstr "" -"Eine neue Standard-policy.json Datei wird ausgeliefert, um die RBAC-" -"Kontrolle richtig zu verarbeiten. Die vorhandene policy.json Dateien " -"erlauben möglicherweise nicht den notwendigen Zugriff." - -msgid "" -"A new option “rest_notifier_ca_bundle_certificate_path” has been added in " -"the configuration file, set None as default value. If this option is present " -"and SSL is used for alarm action the certificate path provided will be used " -"as value of verify parameter in action request." -msgstr "" -"Eine neue Option “rest_notifier_ca_bundle_certificate_path” wurde der " -"Konfigurationsdatei hinzugefügt, mit Keine als Standardwert. Wenn die Option " -"vorhanden ist und SSL für Alarmaktionen verwendet wird, so wird der " -"Zertifikatpfad als Wert verwendet, um Parameter in Aktionsanfragen zu " -"überprüfen." - -msgid "" -"Add a new composite type alarm, which allow users specifying a composite " -"rule to define an alarm with multiple triggering conditions, using a " -"combination of *and*, *or* relationships. The composite rule is composed of " -"multiple threshold rules or gnocchi rules." -msgstr "" -"Ein neuer Composite Alarmtyp wurde hinzugefügt, welcher dem Benutzer " -"erlaubt, zusammengesetzte Regeln mit multiplen Auslösebedingungen zu " -"erstellen. Dazu werden *and* und *or* Beziehungen verwendet. Die Composite-" -"Regel besteht aus multiplen Schwellwertregeln oder Gnocchi-Regeln." - -msgid "" -"Add a tool for converting combination alarms to composite alarms, since we " -"have deprecated the combination alarm support and recommend to use composite " -"alarm to perform multiple conditions alarming." -msgstr "" -"Werkzeug hinzugefügt, um Kombinationsalarme in Composite-Alarme zu " -"konvertieren, da die Kombinationsalarmunterstützung abgekündigt wurde. Es " -"wird empfohlen, zur Alarmierung durch multiple Bedingungen Composite-Alarme " -"zu verwenden." - -msgid "" -"Add a tool for migrating alarm and alarm history data from NoSQL storage to " -"SQL storage. The migration tool has been tested OK in devstack environment, " -"but users need to be cautious with this, because the data migration between " -"storage backends is a bit dangerous." -msgstr "" -"Werkzeug hinzugefügt zum migrieren von Alarm- und Alarmverlaufsdaten aus " -"NoSQL in SQL. Das Migrationswerkzeug wurde erfolgreich in der " -"Devstackumgebung gestestet, aber Benutzer sollten dennoch vorsichtig " -"vorgehen. Die Datenmigration zwischen Storage-Backends ist gefährlich." - -msgid "Add support for Keystone v3 authentication" -msgstr "Unterstützung für Keystone v3 Authentifizierung hinzugefügt" - -msgid "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-listener to grab multiple event messages per thread to enable more " -"efficient processing." -msgstr "" -"Unterstützung zur Stapelverarbeitung von Nachrichten in der Warteschlange " -"hinzugefügt. Dies erlaubt dem aodh-listener mehrere Nachrichten pro Thread " -"aufzunehmen und somit eine effizientere Bearbeitung." - -msgid "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-notifier to grab multiple messages per thread to enable more efficient " -"processing." -msgstr "" -"Unterstützung zur Stapelverarbeitung von Nachrichten in der Warteschlange " -"hinzugefügt. Dies erlaubt dem aodh-notifier mehrere Nachrichten pro Thread " -"aufzunehmen und somit eine effizientere Bearbeitung." - -msgid "Alarm name unique constraint for each project has been removed." -msgstr "" -"Notwendigkeit eines einmaligen Alarmnamens für jedes Projekt wurde entfernt." - -msgid "All the deprecated non-SQL drivers have been removed." -msgstr "Alle veralteten nicht-SQL Treiber wurden entfernt." - -msgid "" -"Allow to extends the alarm evaluation windows to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." -msgstr "" -"Es ist erlaubt, das Alarmevaluierungsfenster zu erweitern, um die " -"Verzögerung zwischen Report und Aufnahme zu kompensieren. Eine neue Option " -"additional_ingestion_lag wurde eingeführt, mit Standard 0. Sie bezeichnet " -"die Anzahl an Sekunden der Fenstererweiterung." - -msgid "" -"Allow to extends the alarm evaluation windows to to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." -msgstr "" -"Es ist erlaubt, das Alarmevaluierungsfenster zu erweitern, um die " -"Verzögerung zwischen Report und Aufnahme zu kompensieren. Eine neue Option " -"additional_ingestion_lag wurde eingeführt, mit Standard 0. Sie bezeichnet " -"die Anzahl an Sekunden der Fenstererweiterung." - -msgid "Aodh Release Notes" -msgstr "Aodh Releasenotes" - -msgid "" -"Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " -"meaning it is now the minimum required version of MySQL." -msgstr "" -"Aodh verwendet jetzt Mikrosekunden Zeitstempel, die seit MySQL 5.6.4 " -"verfügbar sind. Dies ist somit die minimale erforderliche MySQL Version." - -msgid "" -"Because queues provide the equivalent functionality. RPC support is " -"deprecated and will be removed after Mitaka." -msgstr "" -"RPC Unterstützung ist veraltet und wird nach Mitaka entfernt, da " -"Warteschlangen (Queues) die gleiche Funktionalität bieten." - -msgid "Bug Fixes" -msgstr "Fehlerkorrekturen" - -msgid "Current Series Release Notes" -msgstr "Aktuelle Serie Releasenotes" - -msgid "Deprecation Notes" -msgstr "Ablaufwarnungen" - -msgid "" -"Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient " -"backend for handling the scope of alarms. To maximise available resources, " -"NoSQL backends are deprecated so developers do not need to worry about " -"adding features to multiple backends." -msgstr "" -"Unterstützung für NoSQL Backends in Aodh entfällt. SQL ist ein vollkommen " -"ausreichendes Backend um Alarme zu handhaben. Zum maximieren vorhandener " -"Ressourcen werden NoSQL Backends als veraltet aussortiert, so dass sich " -"Entwickler keine Gedanken beim hinzufügen neuer Funktionen für multiple " -"Backends machen müssen." - -msgid "" -"Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" -"notifier to run in multiple worker mode. New options are introduced " -"corresponsively as [evaluator]workers, [listener]workers and " -"[notifier]workers. They all default to 1." -msgstr "" -"Aodh Diesnte aktiviert, einschließlich aodh-evaluator, aodh-listener und " -"aodh-notifier, damit diese in multiplen Worker-Modi laufen. Neue Optionen " -"wurden hinzugefügt, für [evaluator]Worker, [listener]Worker und " -"[notifier]Workers. Standardmäßig sind sie auf 1 gesetzt." - -msgid "" -"Gnocchi aggregation capabilities are now cached to minimise redundant calls " -"to Gnocchi when validating aggregation methods. The cache is stored in-" -"memory for an hour. If additional aggregations are added to Gnocchi, they " -"will not be proprogated to Aodh's API service for at most an hour or unless " -"the service is restarted." -msgstr "" -"Die Kumulierungsfähigkeiten in Gnocchi haben jetzt einen Cache, so dass " -"redundante Aufrufe an Gnocchi bei der Prüfung von Kumulierungsmethoden " -"minimiert werden. Der Cache wird für eine Stunde im Speicher gehalten. Zu " -"Gnocchi hinzugefügte Kumulierungen werden für maximal eine Stunde nicht an " -"Aodhs API-Dienst übergeben, außer der Dienst wird neu gestartet." - -msgid "Gnocchi dispatcher now uses client rather than direct http requests" -msgstr "" -"Der Gnocchi Dispatcher verwendet den Klienten und nicht mehr direkte HTTP-" -"Anfragen" - -msgid "Liberty Series Release Notes" -msgstr "Liberty Serie Releasenotes" - -msgid "Mitaka Series Release Notes" -msgstr "Mitaka Serie Releasenotes" - -msgid "New Features" -msgstr "Neue Funktionen" - -msgid "Newton Series Release Notes" -msgstr "Newton Serie Releasenotes" - -msgid "Ocata Series Release Notes" -msgstr "Ocata Serie Releasenotes" - -msgid "Other Notes" -msgstr "Andere Notizen" - -msgid "" -"Patch was added to address inconsistent RBAC policy handling. Certain rules " -"may not have been given appropriate access." -msgstr "" -"Ein Patch wurde hinzugefügt, um die inkonsistente Behandlung von RBAC-" -"Richtlinien zu korrigieren. Bestimmte Regeln konnten nicht den vorgesehenen " -"Zugriff erhalten." - -msgid "" -"Queue based communication is the new default IPC protocol. RPC can still be " -"used by choosing rpc as ipc_protocol option. Only one protocol can be run at " -"any given time." -msgstr "" -"Warteschlangenbasierte Kommunikation ist das neue Standard-IPC-Protokoll. " -"RPC kann immer noch verwendet werden, in dem RPC als ipc_protocol Option " -"ausgewählt wird. Es kann immer nur ein Protokoll abgearbeitet werden." - -msgid "Remove eventlet from Aodh in favour of threaded approach" -msgstr "Eventlet aus Aodh entfernt, der Thread-Ansatz wird bevorzugt" - -msgid "Security Issues" -msgstr "Sicherheitsrelevante Probleme" - -msgid "Start using reno to manage release notes." -msgstr "Reno wird für die Verwaltung der Releasenotes verwendet." - -msgid "" -"Support for queue based communication between alarm evaluator service and " -"alarm notifier services was added. Original implementation involved using " -"RPC but there is significant overhead involved with using RPC. Work queues " -"provided required functionality with better performance." -msgstr "" -"Unterstützung für warteschleifenbasierte Kommunikation zwischen dem " -"Alarmevaluierungsdienst und dem Alarmbenachrichtigungsdienst wurde " -"hinzugefügt. Die Originalimplementierung verwendete RPC, was aber einen " -"bedeutenden Mehraufwand bedeutet. Arbeitswarteschlangen stellen die " -"notwendige Funktionalität bereit und bieten bessere Performanz." - -msgid "The api.pecan_debug option has been removed." -msgstr "Die api.pecan_debug Option wurde entfernt." - -msgid "" -"The combination alarms are officially deprecated and disabled by default. " -"Set api.enable_combination_alarms to True to enable them. Existing alarms " -"will still be evaluated, but access to them via the API is linked to whether " -"that configuration option is turned on or off. It's advised to use composite " -"alarms instead." -msgstr "" -"Kombinationsalarme gelten offiziell als veraltet und sind per Standard " -"deaktiviert. Setzen Sie api.enable_combination_alarms auf Wahr, um sie zu " -"aktivieren. Bereits vorhandene Alarme werden weiterhin evaluiert, aber die " -"Bearbeitung über die API hängt von der Konfigurationsoption ab. Es wird " -"empfohlen, Composite Alarme zu verwenden." - -msgid "The deprecated combination alarms support have been removed." -msgstr "Die veralteten Kombinationsalarme wurden entfernt." - -msgid "Upgrade Notes" -msgstr "Aktualisierungsnotizen" - -msgid "" -"[`bug 1504495 `_] Patch " -"was added to address inconsistent RBAC policy handling. Certain rules may " -"not have been given appropriate access." -msgstr "" -"[`bug 1504495 `_] Ein " -"Patch wurde hinzugefügt, um die inkonsistente RBAC-Richtlinienbehandlung zu " -"korrigieren. Einige Regeln erhielten nicht den notwendigen Zugriff." - -msgid "" -"[`bug 1511252 `_] Fix an issue " -"with combination alarms where it fails to evaluate all issues in the chain " -"of alarms." -msgstr "" -"[`bug 1511252 `_] Problem " -"beseitigt bei der Kombination von Alarmen, wobei nicht alle Ereignisse in " -"der Alarmkette beachtet wurden." - -msgid "" -"[`bug 1513738 `_] Fix an issue " -"where alarms using Gnocchi aggregations are not being evaluated." -msgstr "" -"[`bug 1513738 `_] Problem " -"beseitigt, bei dem Alarme, die Gnocchi aggregations verwenden, nicht " -"evaluiert wurden." - -msgid "" -"[`bug 1518447 `_] Fix to " -"ensure ceilometerclient is properly initialised on startup." -msgstr "" -"[`bug 1518447 `_] " -"Fehlerkorrektur um sicherzustellen, dass ceilometerclient beim starten " -"richtig initialisiert wird." - -msgid "" -"[`bug 1539069 `_] Fix to " -"handle scenario where no valid statistics exist for specified period." -msgstr "" -"[`bug 1539069 `_] " -"Fehlerkorrektur für die Handhabung von Szenarien, in denen keine gültigen " -"Statistiken für den angegeben Zeitraum existieren." - -msgid "" -"[`bug 1540395 `_] Fix reason " -"string to properly handle transitions when one sample is outside of defined " -"threshold." -msgstr "" -"[`bug 1540395 `_] " -"Fehlerkorrektur für die Begründungszeichenkette, um Übergänge zwischen " -"innerhalb und außerhalb von Schwellwerten richtig zu behandeln." - -msgid "" -"[`bug 1575530 `_] Patch was " -"added to fix and improve the partition coordinator, make sure the input " -"tasks can be correctly distributed to partition members." -msgstr "" -"[`bug 1575530 `_] Ein Patch " -"wurde hinzugefügt, um den Partition Coordinator zu verbessern und um " -"sicherzustellen, dass eingegebene Aufgaben richtig an Partitionsmitglieder " -"verteilt werden." - -msgid "" -"[`bug 1582131 `_] Fix an issue " -"with adding CA_BUNDLE certificate parth as value of \"verify\" parameter in " -"SSL requests." -msgstr "" -"[`bug 1582131 `_] Problem " -"beseitigt beim hinzufügen des CA_BUNDLE Zertifikatpfades als Wert für den " -"\"verify\" Parameter in SSL Anfragen." - -msgid "" -"batch_size and batch_timeout configuration options are added to [listener] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." -msgstr "" -"batch_size und batch_timeout Konfigurationsoptionen wurden dem [listener]-" -"Abschnitt der Konfiguration hinzugefügt. batch_size bestimmt die Anzahl der " -"Nachrichten, die vor dem Start der Bearbeitung aufgenommen werden. In " -"gleicher Weise bestimmt batch_timeout die Wartezeit, bevor die Bearbeitung " -"beginnt." - -msgid "" -"batch_size and batch_timeout configuration options are added to [notifier] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." -msgstr "" -"batch_size und batch_timeout Konfigurationsoptionen wurden dem [notifier]-" -"Abschnitt der Konfiguration hinzugefügt. batch_size bestimmt die Anzahl der " -"Nachrichten, die vor dem Start der Bearbeitung aufgenommen werden. In " -"gleicher Weise bestimmt batch_timeout die Wartezeit, bevor die Bearbeitung " -"beginnt." - -msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." -msgstr "" -"Die gnocchiclient Bibliothek ist jetzt notwendig, wenn ceilometer und " -"gnocchi verwendet werden." diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po deleted file mode 100644 index b772ffee..00000000 --- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,413 +0,0 @@ -# Andi Chandler , 2016. #zanata -# Andi Chandler , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Aodh Release Notes 4.0.1\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-07-13 18:01+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-07-11 04:32+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en-GB\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "1.1.1" -msgstr "1.1.1" - -msgid "1.1.3" -msgstr "1.1.3" - -msgid "2.0.0" -msgstr "2.0.0" - -msgid "2.0.1" -msgstr "2.0.1" - -msgid "2.0.2" -msgstr "2.0.2" - -msgid "3.0.0" -msgstr "3.0.0" - -msgid "4.0.0" -msgstr "4.0.0" - -msgid "" -"A healthcheck endpoint is provided by default at /healthcheck. It leverages " -"oslo_middleware healthcheck middleware. It allows to retrieve information " -"about the health of the API service." -msgstr "" -"A healthcheck endpoint is provided by default at /healthcheck. It leverages " -"oslo_middleware healthcheck middleware. It allows you to retrieve " -"information about the health of the API service." - -msgid "" -"A new default policy.json is provided to properly handle RBAC control. " -"Existing policy.json files may not grant the appropriate access." -msgstr "" -"A new default policy.json is provided to properly handle RBAC control. " -"Existing policy.json files may not grant the appropriate access." - -msgid "" -"A new option “rest_notifier_ca_bundle_certificate_path” has been added in " -"the configuration file, set None as default value. If this option is present " -"and SSL is used for alarm action the certificate path provided will be used " -"as value of verify parameter in action request." -msgstr "" -"A new option “rest_notifier_ca_bundle_certificate_path” has been added in " -"the configuration file, set None as default value. If this option is present " -"and SSL is used for alarm action the certificate path provided will be used " -"as value of verify parameter in action request." - -msgid "" -"Add a new composite type alarm, which allow users specifying a composite " -"rule to define an alarm with multiple triggering conditions, using a " -"combination of *and*, *or* relationships. The composite rule is composed of " -"multiple threshold rules or gnocchi rules." -msgstr "" -"Add a new composite type alarm, which allow users specifying a composite " -"rule to define an alarm with multiple triggering conditions, using a " -"combination of *and*, *or* relationships. The composite rule is composed of " -"multiple threshold rules or Gnocchi rules." - -msgid "" -"Add a tool for converting combination alarms to composite alarms, since we " -"have deprecated the combination alarm support and recommend to use composite " -"alarm to perform multiple conditions alarming." -msgstr "" -"Add a tool for converting combination alarms to composite alarms, since we " -"have deprecated the combination alarm support and recommend to use composite " -"alarm to perform multiple conditions alarming." - -msgid "" -"Add a tool for migrating alarm and alarm history data from NoSQL storage to " -"SQL storage. The migration tool has been tested OK in devstack environment, " -"but users need to be cautious with this, because the data migration between " -"storage backends is a bit dangerous." -msgstr "" -"Add a tool for migrating alarm and alarm history data from NoSQL storage to " -"SQL storage. The migration tool has been tested OK in Devstack environment, " -"but users need to be cautious with this, because the data migration between " -"storage backends is a bit dangerous." - -msgid "Add support for Keystone v3 authentication" -msgstr "Add support for Keystone v3 authentication" - -msgid "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-listener to grab multiple event messages per thread to enable more " -"efficient processing." -msgstr "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-listener to grab multiple event messages per thread to enable more " -"efficient processing." - -msgid "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-notifier to grab multiple messages per thread to enable more efficient " -"processing." -msgstr "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-notifier to grab multiple messages per thread to enable more efficient " -"processing." - -msgid "Alarm name unique constraint for each project has been removed." -msgstr "Alarm name unique constraint for each project has been removed." - -msgid "All the deprecated non-SQL drivers have been removed." -msgstr "All the deprecated non-SQL drivers have been removed." - -msgid "" -"Allow to extends the alarm evaluation windows to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." -msgstr "" -"Allow you to extends the alarm evaluation windows to compensate the " -"reporting/ingestion lag. An new option is introduced " -"additional_ingestion_lag defaulted to 0. It represents the number of seconds " -"of the window extension." - -msgid "" -"Allow to extends the alarm evaluation windows to to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." -msgstr "" -"Allow to extends the alarm evaluation windows to to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." - -msgid "Aodh Release Notes" -msgstr "Aodh Release Notes" - -msgid "" -"Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " -"meaning it is now the minimum required version of MySQL." -msgstr "" -"Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " -"meaning it is now the minimum required version of MySQL." - -msgid "" -"Because queues provide the equivalent functionality. RPC support is " -"deprecated and will be removed after Mitaka." -msgstr "" -"Because queues provide the equivalent functionality. RPC support is " -"deprecated and will be removed after Mitaka." - -msgid "Bug Fixes" -msgstr "Bug Fixes" - -msgid "" -"Ceilometer's API is deprecated in Ocata. Therefore, threshold alarms are now " -"deprecated as well. Threshold rules will be removed when Ceilometer's API is " -"also removed. Similar functionality is provided through Gnocchi alarm rules: " -"``gnocchi_resources_threshold``, " -"``gnocchi_aggregation_by_metrics_threshold``, or " -"``gnocchi_aggregation_by_resources_threshold``." -msgstr "" -"Ceilometer's API is deprecated in Ocata. Therefore, threshold alarms are now " -"deprecated as well. Threshold rules will be removed when Ceilometer's API is " -"also removed. Similar functionality is provided through Gnocchi alarm rules: " -"``gnocchi_resources_threshold``, " -"``gnocchi_aggregation_by_metrics_threshold``, or " -"``gnocchi_aggregation_by_resources_threshold``." - -msgid "Current Series Release Notes" -msgstr "Current Series Release Notes" - -msgid "Deprecation Notes" -msgstr "Deprecation Notes" - -msgid "" -"Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient " -"backend for handling the scope of alarms. To maximise available resources, " -"NoSQL backends are deprecated so developers do not need to worry about " -"adding features to multiple backends." -msgstr "" -"Drop support for NoSQL backends in Aodh. SQL is a perfectly sufficient " -"backend for handling the scope of alarms. To maximise available resources, " -"NoSQL backends are deprecated so developers do not need to worry about " -"adding features to multiple backends." - -msgid "" -"Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" -"notifier to run in multiple worker mode. New options are introduced " -"corresponsively as [evaluator]workers, [listener]workers and " -"[notifier]workers. They all default to 1." -msgstr "" -"Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" -"notifier to run in multiple worker mode. New options are introduced " -"correspondingly as [evaluator]workers, [listener]workers and " -"[notifier]workers. They all default to 1." - -msgid "" -"Gnocchi aggregation capabilities are now cached to minimise redundant calls " -"to Gnocchi when validating aggregation methods. The cache is stored in-" -"memory for an hour. If additional aggregations are added to Gnocchi, they " -"will not be proprogated to Aodh's API service for at most an hour or unless " -"the service is restarted." -msgstr "" -"Gnocchi aggregation capabilities are now cached to minimise redundant calls " -"to Gnocchi when validating aggregation methods. The cache is stored in-" -"memory for an hour. If additional aggregations are added to Gnocchi, they " -"will not be proprogated to Aodh's API service for at most an hour or unless " -"the service is restarted." - -msgid "Gnocchi dispatcher now uses client rather than direct http requests" -msgstr "Gnocchi dispatcher now uses client rather than direct http requests" - -msgid "Liberty Series Release Notes" -msgstr "Liberty Series Release Notes" - -msgid "Mitaka Series Release Notes" -msgstr "Mitaka Series Release Notes" - -msgid "New Features" -msgstr "New Features" - -msgid "Newton Series Release Notes" -msgstr "Newton Series Release Notes" - -msgid "Ocata Series Release Notes" -msgstr "Ocata Series Release Notes" - -msgid "Other Notes" -msgstr "Other Notes" - -msgid "" -"Patch was added to address inconsistent RBAC policy handling. Certain rules " -"may not have been given appropriate access." -msgstr "" -"Patch was added to address inconsistent RBAC policy handling. Certain rules " -"may not have been given appropriate access." - -msgid "" -"Queue based communication is the new default IPC protocol. RPC can still be " -"used by choosing rpc as ipc_protocol option. Only one protocol can be run at " -"any given time." -msgstr "" -"Queue based communication is the new default IPC protocol. RPC can still be " -"used by choosing rpc as ipc_protocol option. Only one protocol can be run at " -"any given time." - -msgid "Remove eventlet from Aodh in favour of threaded approach" -msgstr "Remove eventlet from Aodh in favour of threaded approach" - -msgid "Security Issues" -msgstr "Security Issues" - -msgid "Start using reno to manage release notes." -msgstr "Start using reno to manage release notes." - -msgid "" -"Support for queue based communication between alarm evaluator service and " -"alarm notifier services was added. Original implementation involved using " -"RPC but there is significant overhead involved with using RPC. Work queues " -"provided required functionality with better performance." -msgstr "" -"Support for queue based communication between alarm evaluator service and " -"alarm notifier services was added. Original implementation involved using " -"RPC but there is significant overhead involved with using RPC. Work queues " -"provided required functionality with better performance." - -msgid "The api.pecan_debug option has been removed." -msgstr "The api.pecan_debug option has been removed." - -msgid "" -"The combination alarms are officially deprecated and disabled by default. " -"Set api.enable_combination_alarms to True to enable them. Existing alarms " -"will still be evaluated, but access to them via the API is linked to whether " -"that configuration option is turned on or off. It's advised to use composite " -"alarms instead." -msgstr "" -"The combination alarms are officially deprecated and disabled by default. " -"Set api.enable_combination_alarms to True to enable them. Existing alarms " -"will still be evaluated, but access to them via the API is linked to whether " -"that configuration option is turned on or off. It's advised to use composite " -"alarms instead." - -msgid "The deprecated combination alarms support have been removed." -msgstr "The deprecated combination alarms support have been removed." - -msgid "" -"The reason of the state change is now part of the API as \"state_reason\" " -"field of the alarm object." -msgstr "" -"The reason of the state change is now part of the API as \"state_reason\" " -"field of the alarm object." - -msgid "Upgrade Notes" -msgstr "Upgrade Notes" - -msgid "" -"When an unprivileged user want to access to Gnocchi resources created by " -"Ceilometer, that doesn't work because the filter scope the Gnocchi query to " -"resource owner to the user. To fix we introduce a new configuration option " -"\"gnocchi_external_project_owner\" set by default to \"service\". The new " -"filter now allow two kind of Gnocchi resources: * owned by the user project " -"* owned by \"gnocchi_external_project_owner\" and the orignal project_id of" -msgstr "" -"When an unprivileged user want to access to Gnocchi resources created by " -"Ceilometer, that doesn't work because the filter scope the Gnocchi query to " -"resource owner to the user. To fix we introduce a new configuration option " -"\"gnocchi_external_project_owner\" set by default to \"service\". The new " -"filter now allow two kind of Gnocchi resources: * owned by the user project " -"* owned by \"gnocchi_external_project_owner\" and the orignal project_id of" - -msgid "" -"[`bug 1504495 `_] Patch " -"was added to address inconsistent RBAC policy handling. Certain rules may " -"not have been given appropriate access." -msgstr "" -"[`bug 1504495 `_] Patch " -"was added to address inconsistent RBAC policy handling. Certain rules may " -"not have been given appropriate access." - -msgid "" -"[`bug 1511252 `_] Fix an issue " -"with combination alarms where it fails to evaluate all issues in the chain " -"of alarms." -msgstr "" -"[`bug 1511252 `_] Fix an issue " -"with combination alarms where it fails to evaluate all issues in the chain " -"of alarms." - -msgid "" -"[`bug 1513738 `_] Fix an issue " -"where alarms using Gnocchi aggregations are not being evaluated." -msgstr "" -"[`bug 1513738 `_] Fix an issue " -"where alarms using Gnocchi aggregations are not being evaluated." - -msgid "" -"[`bug 1518447 `_] Fix to " -"ensure ceilometerclient is properly initialised on startup." -msgstr "" -"[`bug 1518447 `_] Fix to " -"ensure Ceilometer client is properly initialised on startup." - -msgid "" -"[`bug 1539069 `_] Fix to " -"handle scenario where no valid statistics exist for specified period." -msgstr "" -"[`bug 1539069 `_] Fix to " -"handle scenario where no valid statistics exist for specified period." - -msgid "" -"[`bug 1540395 `_] Fix reason " -"string to properly handle transitions when one sample is outside of defined " -"threshold." -msgstr "" -"[`bug 1540395 `_] Fix reason " -"string to properly handle transitions when one sample is outside of defined " -"threshold." - -msgid "" -"[`bug 1575530 `_] Patch was " -"added to fix and improve the partition coordinator, make sure the input " -"tasks can be correctly distributed to partition members." -msgstr "" -"[`bug 1575530 `_] Patch was " -"added to fix and improve the partition coordinator, make sure the input " -"tasks can be correctly distributed to partition members." - -msgid "" -"[`bug 1582131 `_] Fix an issue " -"with adding CA_BUNDLE certificate parth as value of \"verify\" parameter in " -"SSL requests." -msgstr "" -"[`bug 1582131 `_] Fix an issue " -"with adding CA_BUNDLE certificate path as value of \"verify\" parameter in " -"SSL requests." - -msgid "" -"batch_size and batch_timeout configuration options are added to [listener] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." -msgstr "" -"batch_size and batch_timeout configuration options are added to [listener] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." - -msgid "" -"batch_size and batch_timeout configuration options are added to [notifier] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." -msgstr "" -"batch_size and batch_timeout configuration options are added to [notifier] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." - -msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." -msgstr "" -"gnocchiclient library is now a requirement if using ceilometer+gnocchi." - -msgid "the resource is the user project." -msgstr "the resource is the user project." diff --git a/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 43ee1be4..00000000 --- a/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,366 +0,0 @@ -# Akihiro Motoki , 2016. #zanata -# Shu Muto , 2017. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Aodh Release Notes 4.0.1\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-07-11 05:32+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-03-06 08:18+0000\n" -"Last-Translator: Shu Muto \n" -"Language-Team: Japanese\n" -"Language: ja\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=1; plural=0\n" - -msgid "1.1.1" -msgstr "1.1.1" - -msgid "2.0.0" -msgstr "2.0.0" - -msgid "2.0.1" -msgstr "2.0.1" - -msgid "2.0.2" -msgstr "2.0.2" - -msgid "3.0.0" -msgstr "3.0.0" - -msgid "4.0.0" -msgstr "4.0.0" - -msgid "" -"A healthcheck endpoint is provided by default at /healthcheck. It leverages " -"oslo_middleware healthcheck middleware. It allows to retrieve information " -"about the health of the API service." -msgstr "" -"ヘルスチェックのエンドポイントは、デフォルトで /healthcheck に用意されていま" -"す。 oslo_middleware の healthcheck ミドルウェアを活用しています。 API サービ" -"スの正常性に関する情報を取得することができます。" - -msgid "" -"A new default policy.json is provided to properly handle RBAC control. " -"Existing policy.json files may not grant the appropriate access." -msgstr "" -"新しいデフォルトの policy.json が RBAC 制御を適切に処理するために提供されてい" -"ます。既存の policy.json ファイルが適切なアクセスを許可していない可能性があり" -"ます。" - -msgid "" -"A new option “rest_notifier_ca_bundle_certificate_path” has been added in " -"the configuration file, set None as default value. If this option is present " -"and SSL is used for alarm action the certificate path provided will be used " -"as value of verify parameter in action request." -msgstr "" -"新しいオプション \"rest_notifier_ca_bundle_certificate_path\" が設定ファイル" -"に追加されました。デフォルト値として None が設定されています。このオプション" -"が存在し、アラームアクションに SSL が使用されている場合、提供された証明書パス" -"はアクション要求の検証パラメータの値として使用されます。" - -msgid "" -"Add a new composite type alarm, which allow users specifying a composite " -"rule to define an alarm with multiple triggering conditions, using a " -"combination of *and*, *or* relationships. The composite rule is composed of " -"multiple threshold rules or gnocchi rules." -msgstr "" -"新しい複合タイプのアラームを追加します。複合ルールを指定すると、 *and* と " -"*or* の関係を組み合わせて複数のトリガー条件を持つアラームを定義できます。複合" -"ルールは、複数のしきい値ルールや gnocchi ルールで構成されます。" - -msgid "" -"Add a tool for converting combination alarms to composite alarms, since we " -"have deprecated the combination alarm support and recommend to use composite " -"alarm to perform multiple conditions alarming." -msgstr "" -"コンビネーションアラームのサポートを非推奨とし、複合アラームを使用して複数の" -"状態のアラームを挙げることを推奨しているため、コンビネーションアラームを複合" -"アラームに変換するためのツールを追加しました。" - -msgid "" -"Add a tool for migrating alarm and alarm history data from NoSQL storage to " -"SQL storage. The migration tool has been tested OK in devstack environment, " -"but users need to be cautious with this, because the data migration between " -"storage backends is a bit dangerous." -msgstr "" -"NoSQL ストレージから SQL ストレージにアラームおよびアラーム履歴データを移行す" -"るためのツールを追加しました。移行ツールは devstack 環境で正常にテストされて" -"いますが、ストレージバックエンド間のデータ移行は少し危険なので、ユーザーはこ" -"れに注意する必要があります。" - -msgid "Add support for Keystone v3 authentication" -msgstr "Keystone v3 認証のサポートを追加しました。" - -msgid "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-listener to grab multiple event messages per thread to enable more " -"efficient processing." -msgstr "" -"キューからのメッセージのバッチ処理をサポートしました。これにより、 aodh-" -"listener はスレッドごとに複数のメッセージを取得して、より効率的に処理します。" - -msgid "" -"Add support for batch processing of messages from queue. This will allow the " -"aodh-notifier to grab multiple messages per thread to enable more efficient " -"processing." -msgstr "" -"キューからのメッセージのバッチ処理をサポートしました。これにより、 aodh-" -"notifier はスレッドごとに複数のメッセージを取得して、より効率的に処理します。" - -msgid "Alarm name unique constraint for each project has been removed." -msgstr "プロジェクト毎のアラーム名の一意成約を削除しました。" - -msgid "All the deprecated non-SQL drivers have been removed." -msgstr "すべての非推奨の non-SQL ドライバーを削除しました。" - -msgid "" -"Allow to extends the alarm evaluation windows to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." -msgstr "" -"報告や取得の遅れを補うためにアラーム評価のウィンドウを拡張することができま" -"す。新しいオプション additional_ingestion_lag が導入され、デフォルトは 0 で" -"す。これは、ウィンドウの拡張を秒数で表します。" - -msgid "" -"Allow to extends the alarm evaluation windows to to compensate the reporting/" -"ingestion lag. An new option is introduced additional_ingestion_lag " -"defaulted to 0. It represents the number of seconds of the window extension." -msgstr "" -"アラーム評価ウィンドウを拡張して、レポート/取り込みの遅延を補うことができま" -"す。新しいオプション additional_ingestion_lag が導入されました。デフォルトは " -"0 です。これは、ウィンドウ拡張の秒数を表します。" - -msgid "Aodh Release Notes" -msgstr "Aodh リリースノート" - -msgid "" -"Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " -"meaning it is now the minimum required version of MySQL." -msgstr "" -"Aodh は現在、MySQL 5.6.4 から利用可能なマイクロ秒のタイムスタンプを利用してい" -"ます。つまり、それが MySQL の最低限必要なバージョンです。" - -msgid "" -"Because queues provide the equivalent functionality. RPC support is " -"deprecated and will be removed after Mitaka." -msgstr "" -"キューが同等の機能を提供するため、 RPC サポートは非推奨となり、 Mitaka の後に" -"削除されます。" - -msgid "Bug Fixes" -msgstr "バグ修正" - -msgid "Current Series Release Notes" -msgstr "開発中バージョンのリリースノート" - -msgid "Deprecation Notes" -msgstr "廃止予定の機能" - -msgid "" -"Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient " -"backend for handling the scope of alarms. To maximise available resources, " -"NoSQL backends are deprecated so developers do not need to worry about " -"adding features to multiple backends." -msgstr "" -"Aodh での NoSQL バックエンドのサポートを中止します。 SQL は、アラームのスコー" -"プを処理するのに十分なバックエンドです。利用可能なリソースを最大限に活用する" -"ため、 NoSQL バックエンドは非推奨としました。開発者は複数のバックエンドに機能" -"を追加する必要はありません。" - -msgid "" -"Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" -"notifier to run in multiple worker mode. New options are introduced " -"corresponsively as [evaluator]workers, [listener]workers and " -"[notifier]workers. They all default to 1." -msgstr "" -"aodh-evaluator 、 aodh-listener 、および aodh-notifier を含む aodh サービスを" -"複数のワーカーモードで実行できるようにしました。新しいオプションは、 " -"[evaluator] workers 、[listener] workers 、[notifier] workers として対応して" -"導入されています。 それらのデフォルトはすべて 1 です。" - -msgid "" -"Gnocchi aggregation capabilities are now cached to minimise redundant calls " -"to Gnocchi when validating aggregation methods. The cache is stored in-" -"memory for an hour. If additional aggregations are added to Gnocchi, they " -"will not be proprogated to Aodh's API service for at most an hour or unless " -"the service is restarted." -msgstr "" -"Gnocchi 集計機能は、集計メソッドの検証時に Gnocchi への冗長な呼び出しを最小限" -"に抑えるようキャッシュされるようになりました。キャッシュはメモリに1時間格納" -"されます。追加の集計が Gnocchi に追加された場合、最大1時間、またはサービスが" -"再起動されない限り、Aodh の API サービスに伝播されることはありません。" - -msgid "Gnocchi dispatcher now uses client rather than direct http requests" -msgstr "" -"Gnocchi のディスパッチャーは、直接 http リクエストを使わず、クライアントを使" -"用するようになりました。" - -msgid "Liberty Series Release Notes" -msgstr "Liberty バージョンのリリースノート" - -msgid "Mitaka Series Release Notes" -msgstr "Mitaka バージョンのリリースノート" - -msgid "New Features" -msgstr "新機能" - -msgid "Newton Series Release Notes" -msgstr "Newton バージョンのリリースノート" - -msgid "Ocata Series Release Notes" -msgstr "Ocata バージョンのリリースノート" - -msgid "Other Notes" -msgstr "その他の注意点" - -msgid "" -"Patch was added to address inconsistent RBAC policy handling. Certain rules " -"may not have been given appropriate access." -msgstr "" -"矛盾した RBAC ポリシー処理に対処するためのパッチが追加されました。特定のルー" -"ルに適切なアクセス権が与えられていない可能性があります。" - -msgid "" -"Queue based communication is the new default IPC protocol. RPC can still be " -"used by choosing rpc as ipc_protocol option. Only one protocol can be run at " -"any given time." -msgstr "" -"キューベースの通信は、新しいデフォルトの IPC プロトコルです。 RPC は " -"ipc_protocol オプションとして rpc を選択することによって引き続き使用できま" -"す。一度に実行できるプロトコルは1つのみです。" - -msgid "Remove eventlet from Aodh in favour of threaded approach" -msgstr "Aodh からイベントレットを削除し、スレッド化するアプローチにします。" - -msgid "Security Issues" -msgstr "セキュリティー上の問題" - -msgid "Start using reno to manage release notes." -msgstr "リリースノートの管理に reno を使い始めました。" - -msgid "" -"Support for queue based communication between alarm evaluator service and " -"alarm notifier services was added. Original implementation involved using " -"RPC but there is significant overhead involved with using RPC. Work queues " -"provided required functionality with better performance." -msgstr "" -"アラーム評価サービスとアラーム通知サービスの間のキューベースの通信のサポート" -"が追加されました。 元の実装では RPC を使用していましたが、 RPC の使用にはかな" -"りのオーバーヘッドがあります。 作業キューは、より良いパフォーマンスで必要な機" -"能を提供します。" - -msgid "The api.pecan_debug option has been removed." -msgstr "api.pecan_debug オプションを削除しました。" - -msgid "" -"The combination alarms are officially deprecated and disabled by default. " -"Set api.enable_combination_alarms to True to enable them. Existing alarms " -"will still be evaluated, but access to them via the API is linked to whether " -"that configuration option is turned on or off. It's advised to use composite " -"alarms instead." -msgstr "" -"コンビネーションアラームは公式に非推奨となり、デフォルトでは無効になっていま" -"す。 有効にするには、 api.enable_combination_alarms を True に設定します。既" -"存のアラームは引き続き評価されますが、 API 経由でのアクセスはその設定オプショ" -"ンのオン/オフによります。代わりに複合アラームを使用することをお勧めします。" - -msgid "The deprecated combination alarms support have been removed." -msgstr "非推奨となっていたコンビネーションアラームを削除しました。" - -msgid "Upgrade Notes" -msgstr "アップグレード時の注意" - -msgid "" -"[`bug 1504495 `_] Patch " -"was added to address inconsistent RBAC policy handling. Certain rules may " -"not have been given appropriate access." -msgstr "" -"[`bug 1504495 `_] 矛盾し" -"た RBAC ポリシー処理に対処するためのパッチが追加されました。特定のルールに適" -"切なアクセス権が与えられていない可能性があります。" - -msgid "" -"[`bug 1511252 `_] Fix an issue " -"with combination alarms where it fails to evaluate all issues in the chain " -"of alarms." -msgstr "" -"[`bug 1511252 `_] コンビネー" -"ションアラームのチェーン内のすべてのアラームを評価できない問題を修正しまし" -"た。" - -msgid "" -"[`bug 1513738 `_] Fix an issue " -"where alarms using Gnocchi aggregations are not being evaluated." -msgstr "" -"[`bug 1513738 `_] Gnocchi の集" -"計を使用したアラームが評価されない問題を修正しました。" - -msgid "" -"[`bug 1518447 `_] Fix to " -"ensure ceilometerclient is properly initialised on startup." -msgstr "" -"[`bug 1518447 `_] " -"ceilometerclient が起動時に適切に初期化されるようにしました。" - -msgid "" -"[`bug 1539069 `_] Fix to " -"handle scenario where no valid statistics exist for specified period." -msgstr "" -"[`bug 1539069 `_] 指定した期間" -"に有効な統計が存在しないシナリオの処理を修正しました。" - -msgid "" -"[`bug 1540395 `_] Fix reason " -"string to properly handle transitions when one sample is outside of defined " -"threshold." -msgstr "" -"[`bug 1540395 `_] 定義した閾値" -"の外側にある1つのサンプルが翻訳を適切に処理するよう、理由の文字列を修正しま" -"した。" - -msgid "" -"[`bug 1575530 `_] Patch was " -"added to fix and improve the partition coordinator, make sure the input " -"tasks can be correctly distributed to partition members." -msgstr "" -"[`bug 1575530 `_] パーティショ" -"ンコーディネーターを修正し改善するためのパッチを追加しました。入力タスクが" -"パーティションメンバーに正しく配布されるようにしてください。" - -msgid "" -"[`bug 1582131 `_] Fix an issue " -"with adding CA_BUNDLE certificate parth as value of \"verify\" parameter in " -"SSL requests." -msgstr "" -"[`bug 1582131 `_] SSL リクエス" -"トの \"verify\" パラメーターの値として CA_BUNDLE 証明書のパスを追加する際の問" -"題を修正しました。" - -msgid "" -"batch_size and batch_timeout configuration options are added to [listener] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." -msgstr "" -"batch_size および batch_timeout 設定オプションを設定の [listener] セクション" -"に追加しました。batch_size は、処理する前に取得するメッセージの数を制御しま" -"す。同様に、batch_timeout は処理前の待機時間を定義します。" - -msgid "" -"batch_size and batch_timeout configuration options are added to [notifier] " -"section of configuration. The batch_size controls the number of messages to " -"grab before processing. Similarly, the batch_timeout defines the wait time " -"before processing." -msgstr "" -"batch_size および batch_timeout 設定オプションを設定の [notifier] セクション" -"に追加しました。batch_size は、処理する前に取得するメッセージの数を制御しま" -"す。同様に、batch_timeout は処理前の待機時間を定義します。" - -msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." -msgstr "" -"ceilometer と gnocchi を使う場合、 gnocchiclient ライブラリーが必要になりまし" -"た。" diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index 0dc585c8..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= - Mitaka Series Release Notes -============================= - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 7b7d7352..00000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= - Newton Series Release Notes -============================= - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index 9515f6cf..00000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ - Ocata Series Release Notes -============================ - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index a3374bc8..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index b73df314..00000000 --- a/requirements.txt +++ /dev/null @@ -1,37 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -tenacity>=3.2.1 # Apache-2.0 -croniter>=0.3.4 # MIT License -futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD -futurist>=0.11.0 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 -keystonemiddleware>=2.2.0 -gnocchiclient>=2.1.0 # Apache-2.0 -lxml>=2.3 -oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 # Apache-2.0 -oslo.config>=2.6.0 # Apache-2.0 -oslo.i18n>=1.5.0 # Apache-2.0 -oslo.log>=1.2.0 # Apache-2.0 -oslo.policy>=0.5.0 # Apache-2.0 -PasteDeploy>=1.5.0 -pbr>=2.0.0 # Apache-2.0 -pecan>=0.8.0 -oslo.messaging>=5.2.0 # Apache-2.0 -oslo.middleware>=3.22.0 # Apache-2.0 -oslo.serialization>=1.4.0 # Apache-2.0 -oslo.utils>=3.5.0 # Apache-2.0 -python-ceilometerclient>=1.5.0 -python-keystoneclient>=1.6.0 -pytz>=2013.6 -requests>=2.5.2 -six>=1.9.0 -stevedore>=1.5.0 # Apache-2.0 -tooz>=1.28.0 # Apache-2.0 -WebOb>=1.2.3 -WSME>=0.8 -cachetools>=1.1.6 -cotyledon -keystoneauth1>=2.1 -debtcollector>=1.2.0 # Apache-2.0 diff --git a/run-functional-tests.sh b/run-functional-tests.sh deleted file mode 100755 index 6720dc9d..00000000 --- a/run-functional-tests.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -x -set -e - -cleanup(){ - type -t database_stop >/dev/null && database_stop || true -} -trap cleanup EXIT - -export GABBI_LIVE_FAIL_IF_NO_TEST=1 -export OS_TEST_PATH=aodh/tests/functional_live/ -export AODH_SERVICE_TOKEN=foobar # Needed for gabbi -export AODH_SERVICE_ROLES=admin - -AODH_TEST_DRIVERS=${AODH_TEST_DRIVERS:-postgresql} -for indexer in ${AODH_TEST_DRIVERS} -do - eval $(pifpaf -e DATABASE run $indexer) - pifpaf -e AODH run aodh --database-url $DATABASE_URL -- ./tools/pretty_tox.sh $* - cleanup -done diff --git a/run-tests.sh b/run-tests.sh deleted file mode 100755 index b0484761..00000000 --- a/run-tests.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -x -set -e -AODH_TEST_DRIVERS=${AODH_TEST_DRIVERS:-postgresql} -for indexer in ${AODH_TEST_DRIVERS} -do - pifpaf -g AODH_TEST_STORAGE_URL run $indexer -- ./tools/pretty_tox.sh $* -done diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 0dfff90f..00000000 --- a/setup.cfg +++ /dev/null @@ -1,151 +0,0 @@ -[metadata] -name = aodh -url = http://launchpad.net/aodh -summary = OpenStack Telemetry Alarming -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://docs.openstack.org/aodh/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - Topic :: System :: Monitoring - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] -packages = - aodh - -[extras] -mysql = - SQLAlchemy>=0.9.7 - sqlalchemy-utils - alembic>=0.7.2 - PyMySQL>=0.6.2 # MIT License - -postgresql = - SQLAlchemy>=0.9.7 - sqlalchemy-utils - alembic>=0.7.2 - psycopg2 - -zaqar = - python-zaqarclient>=1.2.0 - -doc = - openstackdocstheme>=1.11.0 # Apache-2.0 - reno>=0.1.1 # Apache2 - sphinx>=1.6.2 - sphinxcontrib-httpdomain - sphinxcontrib-pecanwsme>=0.8 - -test = - pifpaf[gnocchi]>=1.0.1 - oslotest>=2.15.0 # Apache-2.0 - coverage>=3.6 - fixtures>=1.3.1 - mock>=1.0 - sqlalchemy-utils - tempest>=11.0.0 # Apache-2.0 - testrepository>=0.0.18 - testresources>=0.2.4 # Apache-2.0/BSD - gabbi>=1.30.0 # Apache-2.0 - # Provides subunit-trace - os-testr - python-subunit>=0.0.18 - webtest - gnocchi[postgresql,file] - -[entry_points] -aodh.storage = - log = aodh.storage.impl_log:Connection - mysql = aodh.storage.impl_sqlalchemy:Connection - mysql+pymysql = aodh.storage.impl_sqlalchemy:Connection - postgresql = aodh.storage.impl_sqlalchemy:Connection - sqlite = aodh.storage.impl_sqlalchemy:Connection - -aodh.alarm.rule = - threshold = aodh.api.controllers.v2.alarm_rules.threshold:AlarmThresholdRule - gnocchi_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule - gnocchi_aggregation_by_metrics_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule - gnocchi_aggregation_by_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule - event = aodh.api.controllers.v2.alarm_rules.event:AlarmEventRule - composite = aodh.api.controllers.v2.alarm_rules.composite:composite_rule - -aodh.evaluator = - threshold = aodh.evaluator.threshold:ThresholdEvaluator - gnocchi_resources_threshold = aodh.evaluator.gnocchi:GnocchiResourceThresholdEvaluator - gnocchi_aggregation_by_metrics_threshold = aodh.evaluator.gnocchi:GnocchiAggregationMetricsThresholdEvaluator - gnocchi_aggregation_by_resources_threshold = aodh.evaluator.gnocchi:GnocchiAggregationResourcesThresholdEvaluator - composite = aodh.evaluator.composite:CompositeEvaluator - -aodh.notifier = - log = aodh.notifier.log:LogAlarmNotifier - test = aodh.notifier.test:TestAlarmNotifier - http = aodh.notifier.rest:RestAlarmNotifier - https = aodh.notifier.rest:RestAlarmNotifier - trust+http = aodh.notifier.trust:TrustRestAlarmNotifier - trust+https = aodh.notifier.trust:TrustRestAlarmNotifier - zaqar = aodh.notifier.zaqar:ZaqarAlarmNotifier - trust+zaqar = aodh.notifier.zaqar:TrustZaqarAlarmNotifier - -wsgi_scripts = - aodh-api = aodh.api.app:build_wsgi_app - -console_scripts = - aodh-dbsync = aodh.cmd.storage:dbsync - aodh-expirer = aodh.cmd.storage:expirer - aodh-evaluator = aodh.cmd.alarm:evaluator - aodh-notifier = aodh.cmd.alarm:notifier - aodh-listener = aodh.cmd.alarm:listener - aodh-config-generator = aodh.cmd:config_generator - -oslo.config.opts = - aodh = aodh.opts:list_opts - aodh-auth = aodh.opts:list_keystoneauth_opts - -oslo.config.opts.defaults = - aodh = aodh.conf.defaults:set_cors_middleware_defaults - -tempest.test_plugins = - aodh_tests = aodh.tests.tempest.plugin:AodhTempestPlugin - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source -warning-is-error = 1 - -[pbr] -warnerrors = true -autodoc_index_modules = true -autodoc_exclude_modules = - aodh.storage.sqlalchemy.alembic.* - aodh.tests.* -api_doc_dir = contributor/api - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = aodh/locale/aodh.pot - -[compile_catalog] -directory = aodh/locale -domain = aodh - -[update_catalog] -domain = aodh -output_dir = aodh/locale -input_file = aodh/locale/aodh.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index 056c16c2..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh deleted file mode 100755 index b96dc3f2..00000000 --- a/tools/pretty_tox.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail - -TESTRARGS=$* - -# --until-failure is not compatible with --subunit see: -# -# https://bugs.launchpad.net/testrepository/+bug/1411804 -# -# this work around exists until that is addressed -if [[ "$TESTARGS" =~ "until-failure" ]]; then - ostestr --slowest $TESTRARGS -else - ostestr --no-pretty --slowest --subunit $TESTRARGS | subunit-trace -f -fi diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 05cea4fa..00000000 --- a/tox.ini +++ /dev/null @@ -1,73 +0,0 @@ -[tox] -minversion = 1.6 -skipsdist = True -envlist = py{27,35}{,-mysql,-postgresql},functional,pep8 - -[testenv] -# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt -usedevelop = True -setenv = - AODH_TEST_DRIVERS=postgresql mysql - mysql: AODH_TEST_DRIVERS=mysql - postgresql: AODH_TEST_DRIVERS=postgresql - - AODH_TEST_DEPS=postgresql,mysql - mysql: AODH_TEST_DEPS=mysql - postgresql: AODH_TEST_DEPS=postgresql -deps = .[test,{env:AODH_TEST_DEPS}] -passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE AODH_TEST_DRIVERS -commands = - {toxinidir}/run-tests.sh --black-regex '^aodh\.tests\.tempest\..*$' {posargs} - {toxinidir}/run-functional-tests.sh "{posargs}" - aodh-config-generator - -[testenv:cover] -commands = - pifpaf -g AODH_TEST_STORAGE_URL run mysql -- python setup.py testr --slowest --coverage --testr-args="{posargs}" - coverage report - -[testenv:pep8] -deps = hacking>=0.12,<0.13 -commands = - flake8 - # Check that .po and .pot files are valid: - bash -c "find aodh -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" - -[testenv:releasenotes] -deps = .[doc] -commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:docs] -deps = .[test,doc] -commands = python setup.py build_sphinx -setenv = PYTHONHASHSEED=0 - -[testenv:venv] -deps = .[doc] -commands = {posargs} -setenv = PYTHONHASHSEED=0 - -[testenv:debug] -commands = bash -x oslo_debug_helper {posargs} - -[testenv:debug-mysql] -deps = .[mysql,test] -setenv = OS_TEST_PATH=aodh/tests/functional/ -commands = pifpaf -g AODH_TEST_STORAGE_URL run mysql -- oslo_debug_helper {posargs} - -[testenv:debug-pgsql] -deps = .[postgresql,test] -setenv = OS_TEST_PATH=aodh/tests/functional/ -commands = pifpaf -g AODH_TEST_STORAGE_URL run postgresql -- oslo_debug_helper {posargs} - -[flake8] -ignore = -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build -# [H106] Don't put vim configuration in source files. -# [H203] Use assertIs(Not)None to check for None. -enable-extensions=H106,H203 -show-source = True - -[hacking] -import_exceptions = - aodh.i18n